hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d831ab35f04940ce86d10f3b0746a51ead7fcdfc | 1,600 | py | Python | ehr_ml/extract.py | som-shahlab/ehr_ml | 4f83ac5b882916a175f0d242b38d914d00bf8a7c | [
"MIT"
] | 4 | 2021-03-12T21:41:37.000Z | 2021-06-25T16:49:52.000Z | ehr_ml/extract.py | som-shahlab/ehr_ml | 4f83ac5b882916a175f0d242b38d914d00bf8a7c | [
"MIT"
] | 22 | 2020-11-19T00:04:27.000Z | 2022-03-02T18:16:08.000Z | ehr_ml/extract.py | som-shahlab/ehr_ml | 4f83ac5b882916a175f0d242b38d914d00bf8a7c | [
"MIT"
] | 2 | 2021-05-12T13:11:46.000Z | 2021-10-15T18:30:14.000Z | from __future__ import annotations
import argparse
import bisect
import os
from .extension.extract import extract_omop
def extract_omop_program() -> None:
parser = argparse.ArgumentParser(
description="An extraction tool for OMOP v5 sources"
)
parser.add_argument(
"omop_source", type=str, help="Path of the folder to the omop source",
)
parser.add_argument(
"umls_location", type=str, help="The location of the umls directory",
)
parser.add_argument(
"gem_location", type=str, help="The location of the gem directory",
)
parser.add_argument(
"rxnorm_location",
type=str,
help="The location of the rxnorm directory",
)
parser.add_argument(
"target_location", type=str, help="The place to store the extract",
)
parser.add_argument(
"--delimiter",
type=str,
default=",",
help="The delimiter used in the raw OMOP source",
)
parser.add_argument(
"--ignore_quotes",
dest="use_quotes",
action="store_false",
help="Ignore quotes while parsing",
)
parser.add_argument(
"--use_quotes",
dest="use_quotes",
action="store_true",
help="Use quotes while parsing",
)
parser.set_defaults(use_quotes=True)
args = parser.parse_args()
print(args)
extract_omop(
args.omop_source,
args.umls_location,
args.gem_location,
args.rxnorm_location,
args.target_location,
args.delimiter,
args.use_quotes,
)
| 22.535211 | 78 | 0.620625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 490 | 0.30625 |
d8334d756e7256b282d32ab1967e513fc5ca0140 | 390 | py | Python | src/chains/migrations/0027_chain_description.py | tharsis/safe-config-service | 5335fd006d05fba5b13b477daca9f6ef6d64b818 | [
"MIT"
] | 8 | 2021-07-27T13:21:27.000Z | 2022-02-12T22:46:26.000Z | src/chains/migrations/0027_chain_description.py | protofire/safe-config-service | 6a9a48f5e33950cd5f4f7a66c5e36f4d3b0f2bfa | [
"MIT"
] | 203 | 2021-04-28T08:23:29.000Z | 2022-03-29T15:50:27.000Z | src/chains/migrations/0027_chain_description.py | protofire/safe-config-service | 6a9a48f5e33950cd5f4f7a66c5e36f4d3b0f2bfa | [
"MIT"
] | 23 | 2021-06-25T07:22:31.000Z | 2022-03-29T02:24:46.000Z | # Generated by Django 3.2.7 on 2021-09-16 12:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("chains", "0026_chain_l2"),
]
operations = [
migrations.AddField(
model_name="chain",
name="description",
field=models.CharField(blank=True, max_length=255),
),
]
| 20.526316 | 63 | 0.594872 | 297 | 0.761538 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.230769 |
d8346003220ff71079a13d30e7158a1ac6606e62 | 1,288 | py | Python | leases/services/invoice/berth.py | City-of-Helsinki/berth-reservations | a3b1a8c2176f132505527acdf6da3a62199401db | [
"MIT"
] | 3 | 2020-10-13T07:58:48.000Z | 2020-12-22T09:41:50.000Z | leases/services/invoice/berth.py | City-of-Helsinki/berth-reservations | a3b1a8c2176f132505527acdf6da3a62199401db | [
"MIT"
] | 422 | 2018-10-25T10:57:05.000Z | 2022-03-30T05:47:14.000Z | leases/services/invoice/berth.py | City-of-Helsinki/berth-reservations | a3b1a8c2176f132505527acdf6da3a62199401db | [
"MIT"
] | 1 | 2020-04-03T07:38:03.000Z | 2020-04-03T07:38:03.000Z | from datetime import date
from django.db.models import QuerySet
from payments.enums import OrderStatus
from payments.models import BerthProduct, Order
from ...models import BerthLease
from ...utils import calculate_season_end_date, calculate_season_start_date
from .base import BaseInvoicingService
class BerthInvoicingService(BaseInvoicingService):
def __init__(self, *args, **kwargs):
super(BerthInvoicingService, self).__init__(*args, **kwargs)
self.season_start = calculate_season_start_date()
self.season_end = calculate_season_end_date()
@staticmethod
def get_product(lease: BerthLease) -> BerthProduct:
# The berth product is determined by the width of the berth of the lease
return BerthProduct.objects.get_in_range(width=lease.berth.berth_type.width)
@staticmethod
def get_valid_leases(season_start: date) -> QuerySet:
return BerthLease.objects.get_renewable_leases(season_start=season_start)
@staticmethod
def get_failed_orders(season_start: date) -> QuerySet:
leases = BerthLease.objects.filter(
start_date__year=season_start.year
).values_list("id")
return Order.objects.filter(
_lease_object_id__in=leases, status=OrderStatus.ERROR
)
| 34.810811 | 84 | 0.743789 | 983 | 0.763199 | 0 | 0 | 693 | 0.538043 | 0 | 0 | 76 | 0.059006 |
d835dbb8e8cead5aede4c3f6961c1867eb695b3c | 2,561 | py | Python | scripts/addons/animation_nodes/nodes/boolean/compare.py | Tilapiatsu/blender-custom_conf | 05592fedf74e4b7075a6228b8448a5cda10f7753 | [
"MIT"
] | 2 | 2020-04-16T22:12:40.000Z | 2022-01-22T17:18:45.000Z | scripts/addons/animation_nodes/nodes/boolean/compare.py | Tilapiatsu/blender-custom_conf | 05592fedf74e4b7075a6228b8448a5cda10f7753 | [
"MIT"
] | null | null | null | scripts/addons/animation_nodes/nodes/boolean/compare.py | Tilapiatsu/blender-custom_conf | 05592fedf74e4b7075a6228b8448a5cda10f7753 | [
"MIT"
] | 2 | 2019-05-16T04:01:09.000Z | 2020-08-25T11:42:26.000Z | import bpy
from bpy.props import *
from ... base_types import AnimationNode, DataTypeSelectorSocket
compare_types = ["A = B", "A != B", "A < B", "A <= B", "A > B", "A >= B", "A is B","A is None"]
compare_types_items = [(t, t, "") for t in compare_types]
numericLabelTypes = ["Integer", "Float"]
class CompareNode(bpy.types.Node, AnimationNode):
bl_idname = "an_CompareNode"
bl_label = "Compare"
dynamicLabelType = "HIDDEN_ONLY"
assignedType: DataTypeSelectorSocket.newProperty(default = "Integer")
compareType: EnumProperty(name = "Compare Type",
items = compare_types_items, update = AnimationNode.refresh)
def create(self):
self.newInput(DataTypeSelectorSocket("A", "a", "assignedType"))
if self.compareType != "A is None":
self.newInput(DataTypeSelectorSocket("B", "b", "assignedType"))
self.newOutput("Boolean", "Result", "result")
def draw(self, layout):
layout.prop(self, "compareType", text = "Type")
def drawLabel(self):
label = self.compareType
if self.assignedType in numericLabelTypes and len(self.inputs) == 2:
if getattr(self.socketA, "isUnlinked", False):
label = label.replace("A", str(round(self.socketA.value, 4)))
if getattr(self.socketB, "isUnlinked", False):
label = label.replace("B", str(round(self.socketB.value, 4)))
return label
def drawAdvanced(self, layout):
self.invokeSelector(layout, "DATA_TYPE", "assignType",
text = "Change Type", icon = "TRIA_RIGHT")
def getExecutionCode(self, required):
type = self.compareType
if type == "A = B": return "result = a == b"
if type == "A != B": return "result = a != b"
if type == "A < B": return "try: result = a < b \nexcept: result = False"
if type == "A <= B": return "try: result = a <= b \nexcept: result = False"
if type == "A > B": return "try: result = a > b \nexcept: result = False"
if type == "A >= B": return "try: result = a >= b \nexcept: result = False"
if type == "A is B": return "result = a is b"
if type == "A is None": return "result = a is None"
return "result = False"
def assignType(self, dataType):
if self.assignedType != dataType:
self.assignedType = dataType
self.refresh()
@property
def socketA(self):
return self.inputs.get("A")
@property
def socketB(self):
return self.inputs.get("B")
| 38.223881 | 95 | 0.593128 | 2,262 | 0.883249 | 0 | 0 | 136 | 0.053104 | 0 | 0 | 659 | 0.257321 |
d835ed38f90e0e8ac78ae36e960a76975c5bb157 | 460 | py | Python | main.py | bqia0/CycleGAN | 8be914de7f75de91d2c43c4745e4292d138ff591 | [
"MIT"
] | null | null | null | main.py | bqia0/CycleGAN | 8be914de7f75de91d2c43c4745e4292d138ff591 | [
"MIT"
] | null | null | null | main.py | bqia0/CycleGAN | 8be914de7f75de91d2c43c4745e4292d138ff591 | [
"MIT"
] | null | null | null | import os
import options
import networks
import utils
from argparse import ArgumentParser
def main():
args = options.get_args()
cycleGAN = networks.CycleGAN(args)
if args.train == args.test:
print("What are we even doing here?")
elif args.train:
print("Training")
print(args)
cycleGAN.train(args)
elif args.test:
print("Testing")
cycleGAN.test(args)
if __name__ == "__main__":
main() | 19.166667 | 45 | 0.634783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 59 | 0.128261 |
d836f930db46a6c36122556877e26353c466c74c | 1,457 | py | Python | memecrypt/utilities.py | Sh3llcod3/memecrypt | d1a7a0e8ebf8ca9c4a055587f7287a9b05aaf9d0 | [
"MIT"
] | 1 | 2019-06-22T10:15:11.000Z | 2019-06-22T10:15:11.000Z | memecrypt/utilities.py | Sh3llcod3/memecrypt | d1a7a0e8ebf8ca9c4a055587f7287a9b05aaf9d0 | [
"MIT"
] | 2 | 2020-06-08T17:44:56.000Z | 2020-10-04T00:12:30.000Z | memecrypt/utilities.py | Sh3llcod3/Memecrypt | d1a7a0e8ebf8ca9c4a055587f7287a9b05aaf9d0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
try:
import base64
import binascii
import codecs
import random
except ImportError as import_fail:
print(f"Import error: {import_fail}")
print("Please install this module.")
raise SystemExit(1)
class utils(object):
def enc_utf(input_str):
return input_str.encode("utf-8")
def dec_utf(input_str):
return input_str.decode("utf-8")
def enc_hex(input_str):
return utils.dec_utf(
binascii.hexlify(
utils.enc_utf(input_str)
)
)
def dec_hex(input_str):
return utils.dec_utf(
binascii.hexlify(
utils.enc_utf(input_str)
)
)
def enc_b64(input_str):
return utils.dec_utf(
base64.b64encode(
utils.enc_utf(input_str)
)
)
def dec_b64(input_str):
return utils.dec_utf(
base64.b64decode(
utils.enc_utf(input_str)
)
)
def xor_str(input_val1, input_val2):
xored_str = str()
for i in zip(input_val1, input_val2):
xored_str += chr(ord(i[0]) ^ ord(i[1]))
return xored_str
def rot13(input_str):
return codecs.encode(input_str, "rot13")
def rand_seed(seed_value):
random.seed(seed_value)
def rand_choc(input_values):
return random.choice(input_values)
| 22.075758 | 51 | 0.569664 | 1,184 | 0.812629 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.085793 |
d8391e7552c953147563413d12fda2c7673ac749 | 8,908 | py | Python | challenges/CNMP/poller/for-testing/machine.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 473 | 2016-08-01T12:48:16.000Z | 2022-03-09T18:13:14.000Z | challenges/CNMP/poller/for-testing/machine.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 71 | 2016-08-01T03:33:44.000Z | 2022-03-09T18:37:04.000Z | challenges/CNMP/poller/for-testing/machine.py | pingjuiliao/cb-multios | 64ededd0b87030eda7c40c4388a4ad8283712d8e | [
"MIT"
] | 121 | 2016-08-01T04:07:53.000Z | 2022-03-07T11:08:09.000Z | #!/usr/bin/env python
#
# Copyright (C) 2014 Narf Industries <info@narfindustries.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from generator.actions import Actions
from random import choice, randint
from os.path import dirname, abspath, join
import imp
import random
import string
import sys
CNMP_DIR = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(join(CNMP_DIR, "support"))
from jokes import Jokes
def random_alpha(a, b):
return ''.join(choice(string.letters) for _ in range(randint(a, b)))
def random_string(a, b):
chars = string.letters + string.digits
return ''.join(choice(chars) for _ in range(randint(a, b)))
def random_digits(a, b):
chars = string.digits
return ''.join(choice(chars) for _ in range(randint(a, b)))
class CNMP(Actions):
MAX_JOKES=256
MAX_JOKE_STRING_LEN=512
JOKE_ID_CHAR_LEN=11
CMD_CHAR_LEN=20
def _load_jokes(self):
self.state['jokes'] = Jokes().get_jokes()
def start(self):
self._load_jokes()
self.read(delim="\n",
expect="This is the Chuck Norris (joke) Management Protocol (CNMP).\n",
expect_format='asciic')
self.read(delim="\n",
expect="We know you need a break, so take some time and amuse yourself. " \
"If you have a good Chuck Norris joke, you can add it to our listing.\n",
expect_format='asciic')
def menu(self):
self.read(delim="\n",
expect="Enter one of these commands:\n",
expect_format='asciic')
self.read(delim="\n",
expect=" LIST - list all jokes\n",
expect_format='asciic')
self.read(delim="\n",
expect=" ADD - upload your own joke\n",
expect_format='asciic')
self.read(delim="\n",
expect=" COUNT - find out how many jokes we have to offer you\n",
expect_format='asciic')
self.read(delim="\n",
expect=" SHOW - show a joke\n",
expect_format='asciic')
self.read(delim="\n",
expect=" MENU - display this menu\n",
expect_format='asciic')
self.read(delim="\n",
expect=" HELP - you gotta be kididin' me!\n",
expect_format='asciic')
self.read(delim="\n",
expect=" QUIT - take off eh?\n",
expect_format='asciic')
def root_prompt(self):
self.read(delim='> ', expect='> ', expect_format='asciic')
def empty(self):
self.write("\x07")
def quit(self):
self.write("QUIT\x07")
expect_str = "Thanks for the laughs. Come back soon!\n"
self.read(delim='\n', expect=expect_str, expect_format='asciic')
return -1
def help(self):
self.write("HELP\x07")
expect_str = "Is our menu so extensive that you cannot remember all of the commands? " \
"Here, let me jog all 6 of your neurons:\n"
self.read(delim='\n', expect=expect_str, expect_format='asciic')
def list_jokes(self):
self.write("LIST\x07")
# joke is list of lines with format "joke_id: joke\n"
for idx, joke in enumerate(self.state['jokes'], 0):
self.read(delim='\n',
expect="{0}: {1}\n".format(idx, joke),
expect_format='asciic')
def count(self):
self.write("COUNT\x07")
# read count string
expect_str = "Current CNMP joke count is {0}. Enjoy!\n".format(len(self.state['jokes']))
self.read(delim='\n', expect=expect_str, expect_format='asciic')
def menu_cmd(self):
self.write("MENU\x07")
def bad_cmd_die(self):
# submit a bad command; will exit connection
# bad command: (self.CMD_CHAR_LEN chars w/o trailing '\x07')
# have to stay w/in CMD_CHAR_LEN else poller will fail attempting to
# write the extra bytes that the CB will never read
cmd = random_string(self.CMD_CHAR_LEN, self.CMD_CHAR_LEN)
expect_str = "\nAre you kidding me? What is that garbage? " \
"I thought my instructions were pretty straight forward. " \
"Try again.\n"
self.write('{0}'.format(cmd))
self.read(delim='.\n', expect=expect_str, expect_format='asciic')
return -1
def add_msg(self):
expect_str = ""
self.write("ADD\x07")
self.read(delim="\n", expect="So, you think you have a good Chuck Norris joke? "\
"Give me the joke string already....\n", expect_format='asciic')
self.read(delim='> ', expect='ADD> ', expect_format='asciic')
# submit a bad message; will exit connection
# bad message: (> MAX_JOKE_STRING_LEN chars, i.e. MAX_JOKE_STRING_LEN*'A' w/o '\x07')
if self.chance(0.0001):
joke = random_string(self.MAX_JOKE_STRING_LEN, self.MAX_JOKE_STRING_LEN)
expect_str = "\nAre you kidding me? What is that garbage? " \
"I thought my instructions were pretty straight forward. " \
"Try again.\n"
self.write('{0}'.format(joke))
self.read(delim='.\n', expect=expect_str, expect_format='asciic')
return -1
# submit a bad joke
# bad joke (MAX_JOKE_STRING_LEN-1 chars, i.e (MAX_JOKE_STRING_LEN-1)*'A' + \x07)
elif self.chance(0.001):
joke = random_string(self.MAX_JOKE_STRING_LEN - 1, self.MAX_JOKE_STRING_LEN - 1)
expect_str = "\nThat joke you gave me is crap! " \
"For that, you get to start over!\n"
self.write('{0}\x07'.format(joke))
self.read(delim='!\n', expect=expect_str, expect_format='asciic')
else:
joke = random_string(1, self.MAX_JOKE_STRING_LEN - 2)
self.write('{0}\x07'.format(joke))
joke_count = len(self.state['jokes'])
if joke_count < self.MAX_JOKES:
# add a joke
expect_str = "Joke added. Thanks for sharing! Your joke is ID: {0}\n".format(joke_count)
self.state['jokes'].append(joke)
else:
# joke db is full
expect_str = "Lordy, lordy, I've had enough Chuck Norris jokes. Go away.\n"
self.read(delim='\n', expect=expect_str, expect_format='asciic')
def show(self):
expect_str = "Give me the ID of the joke you want to read. Or better yet, " \
"enter RANDOM and I'll choose one for you.\n"
self.write("SHOW\x07")
self.read(delim="\n", expect=expect_str, expect_format="asciic")
def show_prompt(self):
expect_str = 'SHOW> '
self.read(delim='> ', expect=expect_str, expect_format='asciic')
def show_msg_good(self):
expect_str = ""
expect_format='asciic'
joke_id = ""
# find the easteregg joke
if self.chance(0.001):
joke_id = "1337"
expect_str = "Congratulations, you are the only person in the past, present " \
"or future to beat Chuck Norris.\n"
else:
# choose a RANDOM joke
if self.chance(0.2):
joke_id = "RANDOM"
expect_str = "[0-9]+: (.*)\n"
expect_format='pcre'
else:
# id form 0 to joke_count - 1
joke_count = len(self.state['jokes'])
joke_id = str(randint(0, joke_count - 1))
expect_str = "{0}: {1}\n".format(joke_id, self.state['jokes'][int(joke_id)])
self.write("{0}\x07".format(joke_id))
self.read(delim='\n', expect=expect_str, expect_format=expect_format)
def show_msg_bad(self):
# submit a bad joke id, w/ len < self.JOKE_ID_CHAR_LEN
joke_id = ""
if self.chance(0.33):
# numeric < 0
joke_id = "-1{0}".format(random_digits(1,self.JOKE_ID_CHAR_LEN - 5))
elif self.chance(0.33):
#not numeric that is not "RANDOM", w/ len < self.JOKE_ID_CHAR_LEN
joke_id = random_alpha(1, self.JOKE_ID_CHAR_LEN - 1)
if joke_id == "RANDOM":
joke_id = random_alpha(1, self.JOKE_ID_CHAR_LEN - 1)
# submit a bad joke id w/ len = self.JOKE_ID_CHAR_LEN w/o '\x07'
# will exit connection
elif self.chance(0.0001):
joke_id = "1{0}".format(random_digits(self.JOKE_ID_CHAR_LEN-1, self.JOKE_ID_CHAR_LEN-1))
self.write("{0}".format(joke_id))
expect_str = "\nAre you kidding me? What is that garbage? " \
"I thought my instructions were pretty straight forward. Try again.\n"
self.read(delim='.\n', expect=expect_str, expect_format='asciic')
return -1
else:
# number >= len(jokedb), not 1337, w/ len < self.JOKE_ID_CHAR_LEN
joke_id = len(self.state['jokes']) + 1338
self.write("{0}\x07".format(joke_id))
expect_str = "\nWhat do you take me for? That ID is bogus! Try again.\n"
self.read(delim='.\n', expect=expect_str, expect_format='asciic')
| 35.349206 | 92 | 0.683767 | 7,116 | 0.798833 | 0 | 0 | 0 | 0 | 0 | 0 | 4,228 | 0.47463 |
d8393bc95348a0c0e174efe5e2ff9ad2700f4359 | 480 | py | Python | S1/TP4/ex2.py | HerbeMalveillante/ecole | bebbc73cd678c58c9cd40389ea1cf229a0200308 | [
"MIT"
] | null | null | null | S1/TP4/ex2.py | HerbeMalveillante/ecole | bebbc73cd678c58c9cd40389ea1cf229a0200308 | [
"MIT"
] | null | null | null | S1/TP4/ex2.py | HerbeMalveillante/ecole | bebbc73cd678c58c9cd40389ea1cf229a0200308 | [
"MIT"
] | null | null | null | import random
def saisieSomme():
entree = None
while entree not in range(2, 13):
entree = int(input("Entrez un nombre entre 2 et 12 inclus : "))
return entree
def sommeDeuxDes():
de1 = random.randint(1, 6)
de2 = random.randint(1, 6)
print("valeur du dé 1 :", de1, "| valeur du dé 2 :", de2)
print("Somme :", de1 + de2)
return de1 + de2
def jouer():
print("Gagné !" if saisieSomme() == sommeDeuxDes() else "Perdu !")
jouer()
| 17.142857 | 71 | 0.59375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.227743 |
d83a25d19aad9bcd24d479a3205739bae3d518dc | 1,228 | py | Python | Day4/rock.py | viditvarshney/100DaysOfCode | eec82c98087093f1aec1cb21acab82368ae785a3 | [
"MIT"
] | null | null | null | Day4/rock.py | viditvarshney/100DaysOfCode | eec82c98087093f1aec1cb21acab82368ae785a3 | [
"MIT"
] | null | null | null | Day4/rock.py | viditvarshney/100DaysOfCode | eec82c98087093f1aec1cb21acab82368ae785a3 | [
"MIT"
] | null | null | null | import random
import time
print("\n\nWelcome..\n")
time.sleep(2)
print('''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
''')
print('''
_______
---' ____)____
______)
_______)
_______)
---.__________)
''')
print('''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
''')
print("\n\n RULES\n")
print('''
Rock wins against scissors.
Scissors win against paper.
Paper wins against rock.\n''')
# d = {0: 'rock', 1: 'paper', 2: 'scissor'}
while True:
while True:
my_turn = int(
input("Choose one among 0: rock, 1: paper or 2: scissor? "))
if my_turn == 0 or my_turn == 1 or my_turn == 2:
break
else:
print("Invalid input.")
continue
computer_turn = random.randint(0, 2)
win_dict = {
0: "SCISSORS",
1: "PAPER",
2: "ROCK",
}
my_win_option = win_dict[my_turn]
if my_turn == computer_turn:
print("It's a draw..")
elif my_turn == my_win_option:
print("You wins")
else:
print("Computer Wins")
if input("New Game? ('Yes') or ('No'): ").casefold() == 'no':
break
| 18.892308 | 72 | 0.509772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 619 | 0.504072 |
d83b93f39c486d9317c825bf51df461bf2aebcdf | 440 | py | Python | django_request_context/middleware.py | schecterdamien/django-request-context | db1100b611012835fd8528c2ba4848ed87e10f80 | [
"MIT"
] | 13 | 2018-11-01T08:10:54.000Z | 2021-07-09T07:42:18.000Z | django_request_context/middleware.py | schecterdamien/django-request-context | db1100b611012835fd8528c2ba4848ed87e10f80 | [
"MIT"
] | 1 | 2019-07-28T16:49:05.000Z | 2021-10-02T04:37:20.000Z | django_request_context/middleware.py | schecterdamien/django-request-context | db1100b611012835fd8528c2ba4848ed87e10f80 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .globals import request_context
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # Django < 1.10
MiddlewareMixin = object
class RequestContextMiddleware(MiddlewareMixin):
def process_request(self, request):
request_context.init_by_request(request)
def process_response(self, request, response):
request_context.clear()
return response
| 23.157895 | 56 | 0.729545 | 246 | 0.559091 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.086364 |
d83cb6ca0077fd801c02a5b6138c1e95f1102fa5 | 1,724 | py | Python | venv/Lib/site-packages/zmq/tests/test_win32_shim.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 603 | 2020-12-23T13:49:32.000Z | 2022-03-31T23:38:03.000Z | venv/Lib/site-packages/zmq/tests/test_win32_shim.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | venv/Lib/site-packages/zmq/tests/test_win32_shim.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 35 | 2021-03-26T03:12:04.000Z | 2022-03-23T10:15:10.000Z | from __future__ import print_function
import os
import time
import sys
from functools import wraps
from pytest import mark
from zmq.tests import BaseZMQTestCase
from zmq.utils.win32 import allow_interrupt
def count_calls(f):
@wraps(f)
def _(*args, **kwds):
try:
return f(*args, **kwds)
finally:
_.__calls__ += 1
_.__calls__ = 0
return _
@mark.new_console
class TestWindowsConsoleControlHandler(BaseZMQTestCase):
@mark.new_console
@mark.skipif(not sys.platform.startswith('win'), reason='Windows only test')
def test_handler(self):
@count_calls
def interrupt_polling():
print('Caught CTRL-C!')
from ctypes import windll
from ctypes.wintypes import BOOL, DWORD
kernel32 = windll.LoadLibrary('kernel32')
# <http://msdn.microsoft.com/en-us/library/ms683155.aspx>
GenerateConsoleCtrlEvent = kernel32.GenerateConsoleCtrlEvent
GenerateConsoleCtrlEvent.argtypes = (DWORD, DWORD)
GenerateConsoleCtrlEvent.restype = BOOL
# Simulate CTRL-C event while handler is active.
try:
with allow_interrupt(interrupt_polling) as context:
result = GenerateConsoleCtrlEvent(0, 0)
# Sleep so that we give time to the handler to
# capture the Ctrl-C event.
time.sleep(0.5)
except KeyboardInterrupt:
pass
else:
if result == 0:
raise WindowsError()
else:
self.fail('Expecting `KeyboardInterrupt` exception!')
# Make sure our handler was called.
self.assertEqual(interrupt_polling.__calls__, 1)
| 27.806452 | 80 | 0.632251 | 1,304 | 0.756381 | 0 | 0 | 1,452 | 0.842227 | 0 | 0 | 305 | 0.176914 |
dc1a0d7e80f8d8d5c7583da835fc29466459e738 | 1,150 | py | Python | examples/docs_snippets/docs_snippets_tests/concepts_tests/partitions_schedules_sensors_tests/test_schedules.py | Andrew-Crosby/dagster | e646625a687dc656bdd855d88b868de957b37b62 | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets_tests/concepts_tests/partitions_schedules_sensors_tests/test_schedules.py | Andrew-Crosby/dagster | e646625a687dc656bdd855d88b868de957b37b62 | [
"Apache-2.0"
] | null | null | null | examples/docs_snippets/docs_snippets_tests/concepts_tests/partitions_schedules_sensors_tests/test_schedules.py | Andrew-Crosby/dagster | e646625a687dc656bdd855d88b868de957b37b62 | [
"Apache-2.0"
] | null | null | null | import pytest
from dagster import ModeDefinition, build_schedule_context, pipeline, solid, validate_run_config
from docs_snippets.concepts.partitions_schedules_sensors.schedules.schedule_examples import (
my_daily_schedule,
my_hourly_schedule,
my_modified_preset_schedule,
my_monthly_schedule,
my_preset_schedule,
my_weekly_schedule,
test_hourly_schedule,
)
def test_schedule_testing_example():
test_hourly_schedule()
@pytest.mark.parametrize(
"schedule_to_test",
[
my_hourly_schedule,
my_daily_schedule,
my_weekly_schedule,
my_monthly_schedule,
my_preset_schedule,
my_modified_preset_schedule,
],
)
def test_schedule_examples(schedule_to_test):
@solid(config_schema={"date": str})
def process_data_for_date(_):
pass
@pipeline(mode_defs=[ModeDefinition("basic")])
def pipeline_for_test():
process_data_for_date()
schedule_data = schedule_to_test.evaluate_tick(build_schedule_context())
for run_request in schedule_data.run_requests:
assert validate_run_config(pipeline_for_test, run_request.run_config)
| 27.380952 | 96 | 0.752174 | 0 | 0 | 0 | 0 | 695 | 0.604348 | 0 | 0 | 31 | 0.026957 |
dc1b62f0b9101903170d1d98ac96b6de03cffe34 | 17,803 | py | Python | apps/documents/api/v1/views.py | LopsanAMO/Despacho | 0c6be0289caa9eab851fd5fa9077a82c81ddbd95 | [
"MIT"
] | null | null | null | apps/documents/api/v1/views.py | LopsanAMO/Despacho | 0c6be0289caa9eab851fd5fa9077a82c81ddbd95 | [
"MIT"
] | 4 | 2020-06-05T17:33:16.000Z | 2021-06-01T21:56:31.000Z | apps/documents/api/v1/views.py | LopsanAMO/Despacho | 0c6be0289caa9eab851fd5fa9077a82c81ddbd95 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.parsers import MultiPartParser, FormParser
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.decorators import api_view, permission_classes
from .serializers import (
AllUserClientSerializer, ClientSerializer, ClientFolderSerializer,
DocumentDetailSerializer, FolderSerializer, DocumentInfoSerializer,
ClientSimpleSerializer, FolderSimpleSerializer
)
from documents.models import UserClient, Document, FolderClient, Log
from users.permissions import IsAdminDelete
from utils.helpers import RequestInfo, LargeResultsSetPagination
@api_view(['GET'])
def user_by_name(request):
from django.shortcuts import HttpResponse
from django.template import defaultfilters
name = defaultfilters.slugify(request.GET.get('name'))
try:
serializer = ClientSerializer(
UserClient.objects.filter(slug__icontains=name).order_by('name'),
many=True)
return HttpResponse(
json.dumps({
"results": serializer.data,
"count": len(serializer.data)}),
content_type='application/json',
status=200
)
except Exception:
return HttpResponse(
json.dumps({"results": [], "count": 0}),
content_type='application/json',
status=200
)
@api_view(['GET'])
def document_by_name(request):
from django.shortcuts import HttpResponse
from django.template import defaultfilters
name = defaultfilters.slugify(request.GET.get('name'))
try:
serializer = DocumentDetailSerializer(
Document.objects.filter(slug__icontains=name).order_by('name'),
many=True
)
return HttpResponse(
json.dumps({
"results": serializer.data,
"count": len(serializer.data)
}),
content_type='application/json',
status=200
)
except Exception:
return HttpResponse(
json.dumps({"results": [], "count": 0}),
content_type='application/json',
status=200
)
class UserListAPIView(generics.ListAPIView):
"""UserListAPIView
Args:
:param order: (str) newer or older
:param limit: (int) limit pagination per page, default 10
"""
authentication_class = (JSONWebTokenAuthentication,)
queryset = UserClient.objects.all()
serializer_class = AllUserClientSerializer
pagination_class = LargeResultsSetPagination
def get_queryset(self):
order = 'newer'
if self.request.query_params.get('order') is not None:
order = self.request.query_params.get('order')
if order == 'newer':
queryset = self.queryset.order_by('-created')
else:
queryset = self.queryset.order_by('created')
return queryset
class ClientListAPIView(generics.ListAPIView):
"""ClientListAPIView
Args:
:param order: (str) newer or older
:param limit: (int) limit pagination per page, default 10
"""
authentication_class = (JSONWebTokenAuthentication,)
queryset = UserClient.objects.all().order_by('name')
serializer_class = ClientSerializer
pagination_class = LargeResultsSetPagination
def get_queryset(self):
order = None
queryset = self.queryset
if self.request.query_params.get('order') is not None:
order = self.request.query_params.get('order')
if order == 'newer':
queryset = self.queryset.order_by('-created')
else:
queryset = self.queryset.order_by('created')
return queryset
class ClientFolderListAPIView(generics.ListAPIView):
"""ClientFolderListAPIView
Args:
:param name: (str) the name of the client
"""
authentication_class = (JSONWebTokenAuthentication,)
serializer_class = ClientFolderSerializer
def get_queryset(self):
queryset = UserClient.objects.all()
if self.request.query_params.get('name') is not None:
queryset = queryset.filter(
slug=self.request.query_params.get('name'))
else:
queryset = queryset
return queryset
class DocumentListAPIView(generics.ListAPIView):
"""DocumentListAPIView
Args:
:param folder: (str) the name of the folder
"""
authentication_class = (JSONWebTokenAuthentication,)
serializer_class = DocumentDetailSerializer
queryset = Document.objects.all()
def get_queryset(self):
if self.request.query_params.get('folder') is not None:
queryset = self.queryset.filter(
folder=FolderClient.objects.get(
slug=self.request.query_params.get('folder')
)
)
else:
queryset = queryset
return queryset
class UserClientDetailAPIView(APIView):
permission_classes = (IsAdminDelete, )
def get_object(self, pk):
"""get_object
Description:
Get UserClient object or None
Args:
:param pk: (int) UserClient's pk
"""
req_inf = RequestInfo()
try:
return UserClient.objects.get(pk=pk)
except UserClient.DoesNotExist as e:
return e.args[0]
def put(self, request, pk):
"""UserClientDetailAPIView put
Description:
update client information
"""
import pudb
pudb.set_trace()
req_inf = RequestInfo()
user_client = self.get_object(pk)
if isinstance(user_client, UserClient):
serializer = ClientSerializer(user_client, data=request.data)
if serializer.is_valid():
try:
serializer.save()
log = Log.objects.create(
action=Log.NOTIFICATION_TYPE.get_value(
'update_client'),
user=request.user,
description='Modificacion de cliente {} - {}'.format(
serializer.instance.id, serializer.instance.name
)
)
log.save()
return req_inf.status_200()
except Exception as e:
return req_inf.status_400(e.args[0])
return req_inf.status_400(serializer.errors)
else:
return req_inf.status_404(user_client)
def delete(self, request, pk=None):
"""UserClientDetailAPIView delete
Description:
delete clients
Args:
:param name: (str) client name
"""
req_inf = RequestInfo()
name = request.query_params.get('name', None)
if name is not None:
try:
client = UserClient.objects.get(slug=name)
folders = FolderClient.objects.filter(user=client)
last_id = client.id
last_name = client.name
for folder in folders:
documents = Document.objects.filter(folder=folder)
for doc in documents:
doc.document.delete()
doc.delete()
client.delete()
log = Log.objects.create(
action=Log.NOTIFICATION_TYPE.get_value(
'delete_client'),
user=request.user,
description='Eliminacion de cliente {} - {}'.format(
last_id, last_name)
)
log.save()
return req_inf.status_200()
except Exception as e:
return req_inf.status_400(e.args[0])
else:
return req_inf.status_400('Nombre de cliente requerido')
class UserClientAPIView(APIView):
def get(self, request):
"""UserClientAPIView get
Description:
Get client id
Args:
:param name: (str) client slug name
"""
req_inf = RequestInfo()
name = request.GET.get('name', None)
try:
serializer = ClientSimpleSerializer(
UserClient.objects.get(slug=name))
return Response(serializer.data)
except Exception as e:
return req_inf.status_400(e.args[0])
def post(self, request):
"""UserClientAPIView post
Description:
Create clients
Args:
:param name: (str) the name of the client
"""
req_inf = RequestInfo()
serializer = ClientSerializer(data=request.data)
if serializer.is_valid():
try:
serializer.save()
log = Log.objects.create(
action=Log.NOTIFICATION_TYPE.get_value(
'create_client'),
user=request.user,
description='Creacion de cliente {} - {}'.format(
serializer.instance.id, serializer.instance.name
)
)
log.save()
return req_inf.status_200()
except Exception as e:
return req_inf.status(e.args[0])
else:
return req_inf.status_400(serializer.errors)
class FolderAPIView(APIView):
def get(self, request):
"""FolderAPIView get
Description:
Get folder id
Args:
:param name: (str) folder slug name
"""
req_inf = RequestInfo()
name = request.GET.get('name', None)
if name is not None:
try:
serializer = FolderSimpleSerializer(
FolderClient.objects.get(slug=name))
return Response(serializer.data)
except Exception as e:
return req_inf.status_400(e.args[0])
else:
return req_inf.status_400('nombre de folder requerido')
def post(self, request):
"""FolderAPIView post
Description:
Create folders
Args:
:param name: (str) the name of the folder
:param user: (int) user id
"""
req_inf = RequestInfo()
serializer = FolderSerializer(data=request.data)
if serializer.is_valid():
try:
serializer.save()
log = Log.objects.create(
action=Log.NOTIFICATION_TYPE.get_value(
'create_folder'),
user=request.user,
description='Creacion de folder {} - {}'.format(
serializer.instance.id, serializer.instance.name
)
)
log.save()
return req_inf.status_200()
except Exception as e:
return req_inf.status_400(e.args[0])
else:
return req_inf.status_400(serializer.errors)
class FolderClientAPIView(APIView):
permission_classes = (IsAdminDelete, )
def get_object(self, pk):
"""get_object
Description:
Get FolderClient object or None
Args:
:param pk: (int) FolderClient's pk
"""
req_inf = RequestInfo()
try:
return FolderClient.objects.get(pk=pk)
except FolderClient.DoesNotExist as e:
return e.args[0]
def put(self, request, pk=None):
"""FolderClientAPIView put
Description:
update client information
"""
req_inf = RequestInfo()
folder_client = self.get_object(pk)
if isinstance(folder_client, FolderClient):
serializer = FolderSerializer(folder_client, data=request.data)
if serializer.is_valid():
serializer.save()
log = Log.objects.create(
action=Log.NOTIFICATION_TYPE.get_value(
'update_folder'),
user=request.user,
description='Modificacion de folder {} - {}'.format(
serializer.instance.id, serializer.instance.name
)
)
log.save()
return req_inf.status_200()
return req_inf.status_400(serializer.errors)
else:
return req_inf.status_404(folder_client)
def delete(self, request, pk=None):
"""FolderClientAPIView delete
Description:
delete folder
Args:
:param name: (str) folder name
"""
req_inf = RequestInfo()
name = request.query_params.get('name', None)
if name is not None:
try:
folder = FolderClient.objects.get(slug=name)
documents = Document.objects.filter(folder=folder)
last_id = folder.id
last_name = folder.name
for doc in documents:
doc.document.delete()
doc.delete()
folder.delete()
log = Log.objects.create(
action=Log.NOTIFICATION_TYPE.get_value(
'update_client'),
user=request.user,
description='Modificacion de cliente {} - {}'.format(
last_id, last_name
)
)
log.save()
return req_inf.status_200()
except Exception as e:
return req_inf.status_400(e.args[0])
else:
return req_inf.status_400('Nombre de folder requerido')
class DocumentAPIView(APIView):
parser_classes = (MultiPartParser, FormParser)
def post(self, request):
"""DocumentAPIView post
Description:
Create Documents
Args:
:param name: (str) the name of the document
:param document: (file) document file
:param folder: (id) folder id
"""
req_inf = RequestInfo()
serializer = DocumentInfoSerializer(data=request.data)
if serializer.is_valid():
try:
serializer.save()
log = Log.objects.create(
action=Log.NOTIFICATION_TYPE.get_value(
'create_document'),
user=request.user,
description='Creacion de Documento {} - {}'.format(
serializer.instance.id, serializer.instance.name
)
)
log.save()
return req_inf.status_200()
except Exception as e:
return req_inf.status_400(e.args[0])
else:
return req_inf.status_400(serializer.errors)
class DocumentDetailAPIView(APIView):
permission_classes = (IsAdminDelete, )
def get_object(self, pk):
"""get_object
Description:
Get Document object or None
Args:
:param pk: (int) Document's pk
"""
req_inf = RequestInfo()
try:
return Document.objects.get(pk=pk)
except Document.DoesNotExist as e:
return e.args[0]
def put(self, request, pk):
"""DocumentDetailAPIView put
Description:
update document information
"""
req_inf = RequestInfo()
document_cls = self.get_object(pk)
if isinstance(document_cls, Document):
serializer = DocumentInfoSerializer(
document_cls,
data=request.data
)
if serializer.is_valid():
try:
serializer.save()
log = Log.objects.create(
action=Log.NOTIFICATION_TYPE.get_value(
'update_document'),
user=request.user,
description='Modificacion de documento {} - {}'.format(
serializer.instance.id, serializer.instance.name
)
)
log.save()
return req_inf.status_200()
except Exception as e:
return req_inf.status_400(e.args[0])
return req_inf.status_400(serializer.errors)
else:
return req_inf.status_404(document_cls)
def delete(self, request, pk=None):
"""DocumentDetailAPIView delete
Description:
Delete Documents
Args:
:param name: (str) the name of the document
"""
req_inf = RequestInfo()
name = request.query_params.get('name', None)
if name is not None:
try:
document = Document.objects.get(slug=name)
document.document.delete()
last_id = document.id
last_name = document.name
document.delete()
log = Log.objects.create(
action=Log.NOTIFICATION_TYPE.get_value(
'delete_document'),
user=request.user,
description='Eliminacion de documento {}- {}'.format(
las_id, last_name
)
)
log.save()
return req_inf.status_200()
except Exception as e:
return req_inf.status_400(e.args[0])
else:
return req_inf.status_400('Nombre de documento requerido')
| 34.501938 | 79 | 0.548672 | 15,467 | 0.868786 | 0 | 0 | 1,538 | 0.08639 | 0 | 0 | 3,437 | 0.193057 |
dc1c34dd0e875151a5cb78c824921a11dc907308 | 279 | py | Python | samle_python.py | sarum90/langmapper | 010b7f8b075c45f3c43dcd7d478f6bfe192ccc01 | [
"MIT"
] | null | null | null | samle_python.py | sarum90/langmapper | 010b7f8b075c45f3c43dcd7d478f6bfe192ccc01 | [
"MIT"
] | null | null | null | samle_python.py | sarum90/langmapper | 010b7f8b075c45f3c43dcd7d478f6bfe192ccc01 | [
"MIT"
] | null | null | null | def process(record):
ids = (record.get('idsurface', '') or '').split(' ')
if len(ids) > 4:
return {'language': record['language'],
'longitude': float(record['longitude'] or 0),
'latitude': float(record['latitude'] or 0),
'idsurface': ids}
| 34.875 | 56 | 0.555556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.326165 |
dc1c868b655eedce37f969499e3f8d088756e9cb | 297 | py | Python | aulas/sqldb.py | thiagonantunes/Estudos | 4f3238cf036cf8ae5dc4dbdb106a2a85ed134f55 | [
"MIT"
] | 1 | 2020-04-07T09:48:15.000Z | 2020-04-07T09:48:15.000Z | aulas/sqldb.py | thiagonantunes/Estudos | 4f3238cf036cf8ae5dc4dbdb106a2a85ed134f55 | [
"MIT"
] | null | null | null | aulas/sqldb.py | thiagonantunes/Estudos | 4f3238cf036cf8ae5dc4dbdb106a2a85ed134f55 | [
"MIT"
] | null | null | null | import mysql.connector
mydb = mysql.connector.connect(
host ='127.0.0.1',
port = 3306,
user ='root',
password = '',
database="cadastro"
)
cursor = mydb.cursor()
cursor.execute("SELECT * FROM gafanhotos LIMIT 3")
resultado = cursor.fetchall()
for x in resultado:
print(x) | 18.5625 | 50 | 0.653199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 63 | 0.212121 |
dc1d61af75ba3b0bf8f5b6092c2e0716f461d910 | 105 | py | Python | main.py | YunPC/branch-practice | c8977397c9e548b20d05d6edd8c20c3905a89f7c | [
"MIT"
] | null | null | null | main.py | YunPC/branch-practice | c8977397c9e548b20d05d6edd8c20c3905a89f7c | [
"MIT"
] | 1 | 2021-04-01T06:22:09.000Z | 2021-04-01T06:24:37.000Z | main.py | YunPC/branch-practice | c8977397c9e548b20d05d6edd8c20c3905a89f7c | [
"MIT"
] | null | null | null | # print function on main branch
result = ['main' if i%5==0 else i for i in range(1, 10+1)]
print(result)
| 26.25 | 58 | 0.67619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.352381 |
dc1e4af97e1b6d9c66d2fb3cd58e20455d4fb72d | 648 | py | Python | billing/views.py | SiddhantNaik17/TheBashTeam_website | a4c3e023599fa9f1b0afa6485346b5b7b883e7f5 | [
"MIT"
] | null | null | null | billing/views.py | SiddhantNaik17/TheBashTeam_website | a4c3e023599fa9f1b0afa6485346b5b7b883e7f5 | [
"MIT"
] | null | null | null | billing/views.py | SiddhantNaik17/TheBashTeam_website | a4c3e023599fa9f1b0afa6485346b5b7b883e7f5 | [
"MIT"
] | 1 | 2020-11-21T16:03:30.000Z | 2020-11-21T16:03:30.000Z | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from billing.utils import initiate_transaction
PAYTM_MERCHANT_ID = 'SNeEfa79194346659805'
PAYTM_MERCHANT_KEY = 'T7V3cbVmVIN1#YK7'
def initiate(request):
order_id = request.session['order_id']
response = initiate_transaction(order_id)
context = {
'mid': PAYTM_MERCHANT_ID,
'order_id': 2 * order_id,
'txn_token': response['body']['txnToken'],
}
return render(request, 'billing/show_payments.html', context)
@csrf_exempt
def processing(request):
return render(request, 'billing/transaction_in_process.html')
| 27 | 65 | 0.739198 | 0 | 0 | 0 | 0 | 103 | 0.158951 | 0 | 0 | 157 | 0.242284 |
dc1ed9368be673800a00c49ccac8a2d18c339a55 | 622 | py | Python | requests__examples/yahoo_api__rate_currency.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 117 | 2015-12-18T07:18:27.000Z | 2022-03-28T00:25:54.000Z | requests__examples/yahoo_api__rate_currency.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 8 | 2018-10-03T09:38:46.000Z | 2021-12-13T19:51:09.000Z | requests__examples/yahoo_api__rate_currency.py | DazEB2/SimplePyScripts | 1dde0a42ba93fe89609855d6db8af1c63b1ab7cc | [
"CC-BY-4.0"
] | 28 | 2016-08-02T17:43:47.000Z | 2022-03-21T08:31:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# TODO: использовать http://www.cbr.ru/scripts/Root.asp?PrtId=SXML или разобраться с данными от query.yahooapis.com
# непонятны некоторые параметры
# TODO: сделать консоль
# TODO: сделать гуй
# TODO: сделать сервер
import requests
rs = requests.get('https://query.yahooapis.com/v1/public/yql?q=select+*+from+yahoo.finance.xchange+where+pair+=+%22USDRUB,EURRUB%22&format=json&env=store%3A%2F%2Fdatatables.org%2Falltableswithkeys&callback=')
print(rs.json())
for rate in rs.json()['query']['results']['rate']:
print(rate['Name'], rate['Rate'])
| 32.736842 | 208 | 0.729904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 588 | 0.814404 |
dc22ebe19876ff70a14de4bf47ac789c3b50cffb | 2,329 | py | Python | pexecute/runner_wrapper.py | mikevromen/parallel-execute | e5009011a51eb62a2a14839e2a0e3b899fb9be43 | [
"MIT"
] | 66 | 2019-05-30T08:45:17.000Z | 2022-03-13T12:25:35.000Z | pexecute/runner_wrapper.py | mikevromen/parallel-execute | e5009011a51eb62a2a14839e2a0e3b899fb9be43 | [
"MIT"
] | 2 | 2019-10-24T10:24:00.000Z | 2019-11-18T00:21:04.000Z | pexecute/runner_wrapper.py | mikevromen/parallel-execute | e5009011a51eb62a2a14839e2a0e3b899fb9be43 | [
"MIT"
] | 6 | 2020-11-26T20:07:45.000Z | 2021-07-11T09:54:30.000Z | import abc
import logging
from datetime import datetime
from .log_adapter import adapt_log
LOGGER = logging.getLogger(__name__)
class RunnerWrapper(abc.ABC):
""" Runner wrapper class """
log = adapt_log(LOGGER, 'RunnerWrapper')
def __init__(self, func_runner, runner_id, key, tracker, log_exception=True):
""" Runner wrapper initializer
Args:
func_runner (FuncRunner): FuncRunner instance
runner_id (int): runner id
key (str): key to store the function output in output dict
tracker (dict): tracker dict
"""
self.func_runner = func_runner
self.id = runner_id
self.tracker = tracker
self.log_exception = log_exception
self.key = key
self.runner = None
self.__initialize_tracker()
def __str__(self):
return "<RunnerWrapper %s[#%s] %s>" % (self.key, self.id, self.func_runner)
def __initialize_tracker(self):
self.tracker[self.key] = dict()
def __update_tracker(self, started, finished, output, got_error, error):
""" Updates status in output dict """
self.tracker[self.key] = {
"started_time": started,
"finished_time": finished,
"execution_time": (finished - started).total_seconds(),
"output": output,
"got_error": got_error,
"error": error
}
def is_tracker_updated(self):
return True if self.tracker[self.key] else False
def run(self):
""" Runs function runner """
output, error, got_error = None, None, False
started = datetime.now()
try:
output = self.func_runner.run()
except Exception as e:
got_error = True
error = str(e)
if self.log_exception:
self.log.exception("Encountered an exception on {} {}".format(self, e))
finally:
finished = datetime.now()
self.__update_tracker(started, finished, output, got_error, error)
def join(self):
self.runner.join()
@abc.abstractmethod
def start(self):
""" Starts runner thread """
pass
@abc.abstractmethod
def is_running(self):
""" Returns True if runner is active else False """
pass
| 26.770115 | 87 | 0.592958 | 2,196 | 0.942894 | 0 | 0 | 210 | 0.090167 | 0 | 0 | 587 | 0.25204 |
dc2545649072034e58b37faa1c657f442101a9f5 | 46 | py | Python | pythonExercicios/desafio027.py | GuilhermeKAC/cursoemvideo-python | 7a76f07aa8d8ae462eab7624a0b3cbc7df1fb02e | [
"MIT"
] | 1 | 2021-07-18T17:53:32.000Z | 2021-07-18T17:53:32.000Z | pythonExercicios/desafio027.py | GuilhermeKAC/cursoemvideo-python | 7a76f07aa8d8ae462eab7624a0b3cbc7df1fb02e | [
"MIT"
] | null | null | null | pythonExercicios/desafio027.py | GuilhermeKAC/cursoemvideo-python | 7a76f07aa8d8ae462eab7624a0b3cbc7df1fb02e | [
"MIT"
] | null | null | null | nome = input('Dgigite seu nome completo: ')
| 11.5 | 43 | 0.673913 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.630435 |
dc2700f6e1bcfefcc738d3035b2d0f9af75c4bbe | 8,478 | py | Python | src/lib/data_collect/mushroomObserver/api/taxon.py | DCEN-tech/Mushroom_Py-cture_Recognition | 33a85ee401e0d812f53cab620c16466f2134a441 | [
"MIT"
] | null | null | null | src/lib/data_collect/mushroomObserver/api/taxon.py | DCEN-tech/Mushroom_Py-cture_Recognition | 33a85ee401e0d812f53cab620c16466f2134a441 | [
"MIT"
] | null | null | null | src/lib/data_collect/mushroomObserver/api/taxon.py | DCEN-tech/Mushroom_Py-cture_Recognition | 33a85ee401e0d812f53cab620c16466f2134a441 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Import Librairies
#
# Python
import requests
#
# User
from data_collect.mushroomObserver.api import api
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Constants
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
TAXON_TABLE_NAME = 'names'
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Classes
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CMO_API_GET_Taxon(api.CMO_API_GET):
'''
DESCRIPTION
This class implements the code needed to realize Http GET commands on TAXONS through MushroomObserver API.
'''
def __init__(
self
, moAPIHttp_GET_taxon_req
, moAPIHttp = None
):
'''
DESCRIPTION
Constructor for the class CMO_API_GET_Taxon.
ARGUMENTS
moAPIHttp_GET_taxon_req
Default: no default
this argument is expected to contain the GET request that must be done on Taxons (i.e. the request must
contain the criteria to filter Taxons that are requested).
moAPIHttp
Default: None
this argument corresponds to an instance of CMO_API_HTTP class.
As a reminder CMO_API_HTTP is the class that implements Http commands through MushroomObserver API.
If None, then an instance of CMO_API_HTTP class will be created internally by the constructor.
'''
global TAXON_TABLE_NAME
self.__tableName = TAXON_TABLE_NAME
super().__init__(
self.__tableName
, moAPIHttp_GET_request = moAPIHttp_GET_taxon_req
, moAPIHttp = moAPIHttp
)
class CMO_API_GET_Taxon_Request(api.CMO_API_GET_REQUEST):
'''
DESCRIPTION
This class implements the object that will be used as request to GET Taxons through MushroomObserver API.
'''
def __init__(self
, id = None
, name = None
, textNameHas = None
, rank = None
, classificationHas = None
, createdAt = None
, hasClassification = None
, hasDescription = None
, hasNotes = None
, hasSynonyms = None
, includeSubtaxa = None
, includeSynonyms = None
, isDeprecated = None
, location = None
, misspellings = None
, detail = 'low'
, format = 'json'
):
'''
DESCRIPTION
Constructor of CMO_API_GET_Taxon_Request class.
You just have to provide all criteria you want to use to retrieve only Taxons of concerned.
Then the constructor will internally produced a corresponding filter that could be used by the
CMO_API_GET_Taxon's get method to retrieve Taxons.
ARGUMENTS
id
specifies the ID the Taxon you want to get.
name
specifies the NAME the Taxon you want to get.
textNameHas
specifies a substring
rank
specifies the taxonomy rank you are interested in (ex: family, genus, ...)
detail
Possible values: { 'low' | 'high' | None }
Specifies the detail level that the GET method will use to produce the result.
As a reminder the GET method is implemented by the CMO_API_GET_Taxon class.
format
Possible values: { 'json' }
Specifies the format that the GET method will use to produce the result.
As a reminder the GET method is implemented by the CMO_API_GET_Taxon class.
'''
super().__init__()
if (id is not None):
self.set(key = 'id', value = id)
if (name is not None):
self.set(key = 'name', value = name)
if (textNameHas is not None):
self.set(key = 'text_name_has', value = textNameHas)
if (rank is not None):
self.set(key = 'rank', value = rank)
if (classificationHas is not None):
self.set(key = 'classification_has', value = classificationHas)
if (createdAt is not None):
self.set(key = 'created_at', value = createdAt)
if (hasClassification is not None):
self.set(key = 'has_classification', value = hasClassification)
if (hasDescription is not None):
self.set(key = 'has_description', value = hasDescription)
if (hasNotes is not None):
self.set(key = 'has_notes', value = hasNotes)
if (hasSynonyms is not None):
self.set(key = 'has_synonyms', value = hasSynonyms)
if (includeSubtaxa is not None):
self.set(key = 'include_subtaxa', value = includeSubtaxa)
if (includeSynonyms is not None):
self.set(key = 'include_synonyms', value = includeSynonyms)
if (isDeprecated is not None):
self.set(key = 'is_deprecated', value = isDeprecated)
if (location is not None):
self.set(key = 'location', value = location)
if (misspellings is not None):
self.set(key = 'misspellings', value = misspellings)
self.set(key = 'detail', value = detail)
self.set(key = 'format', value = format)
class CMO_API_Taxon_Data:
'''
DESCRIPTION
This class implements the object that will contain the data extracted from the result of an Http GET command on
TAXON.
'''
def __init__(
self
, taxonHttpResult # CMO_API_HTTP_Result
):
self.__recsetTaxon = []
self.__recsetTaxonParent = []
self.__recsetTaxonSynonym = []
null_recTaxon = {
'id' : None
, 'name' : None
, 'rank' : None
, 'deprecated' : None
, 'misspelled' : None
, 'views' : None
}
null_recTaxonParent = {
'taxon_id' : None
, 'parent_name' : None
, 'parent_rank' : None
}
null_recTaxonSynonym = {
'taxon_id' : None
, 'id' : None
, 'synonym_name' : None
, 'synonym_rank' : None
, 'synonym_deprecated' : None
, 'synonym_id' : None
}
if (taxonHttpResult is not None):
for taxonData in taxonHttpResult.get_data():
taxonId = taxonData.get('id')
taxonParents = taxonData.get('parents')
taxonSynonyms = taxonData.get('synonyms')
# Taxon record
recTaxon = null_recTaxon.copy()
recTaxon['id'] = taxonId
recTaxon['name'] = taxonData.get('name')
recTaxon['rank'] = taxonData.get('rank')
recTaxon['deprecated'] = taxonData.get('deprecated')
recTaxon['misspelled'] = taxonData.get('misspelled')
recTaxon['views'] = taxonData.get('number_of_views')
self.__recsetTaxon.append(recTaxon)
# Taxon_parent record
if (taxonParents is not None):
for parent in taxonParents:
recTaxonParent = null_recTaxonParent.copy()
if (parent is not None):
recTaxonParent['taxon_id'] = taxonId
recTaxonParent['parent_name'] = parent.get('name')
recTaxonParent['parent_rank'] = parent.get('rank')
self.__recsetTaxonParent.append(recTaxonParent)
# Taxon_synonym recordset
if (taxonSynonyms is not None):
for synonym in taxonSynonyms:
recTaxonSynonym = null_recTaxonSynonym.copy()
if (synonym is not None):
recTaxonSynonym['taxon_id'] = taxonId
recTaxonSynonym['id'] = synonym.get('id')
recTaxonSynonym['synonym_name'] = synonym.get('name')
recTaxonSynonym['synonym_rank'] = synonym.get('rank')
recTaxonSynonym['synonym_deprecated'] = synonym.get('deprecated')
recTaxonSynonym['synonym_id'] = synonym.get('synonym_id')
self.__recsetTaxonSynonym.append(recTaxonSynonym)
def get_recsetTaxon(self):
return self.__recsetTaxon
def get_recsetTaxonParent(self):
return self.__recsetTaxonParent
def get_recsetTaxonSynonym(self):
return self.__recsetTaxonSynonym
| 33.377953 | 117 | 0.558033 | 8,077 | 0.952701 | 0 | 0 | 0 | 0 | 0 | 0 | 3,362 | 0.396556 |
dc27033e5b3226d69a85824ce542c315fefd87b4 | 2,636 | py | Python | hidrocomp/graphics/genpareto.py | clebsonpy/HydroComp | 9d17fa533e8a15c760030df5246ff531ddb4cb22 | [
"MIT"
] | 4 | 2020-05-14T20:03:49.000Z | 2020-05-22T19:56:43.000Z | hidrocomp/graphics/genpareto.py | clebsonpy/HydroComp | 9d17fa533e8a15c760030df5246ff531ddb4cb22 | [
"MIT"
] | 19 | 2019-06-27T18:12:27.000Z | 2020-04-28T13:28:03.000Z | hidrocomp/graphics/genpareto.py | clebsonpy/HydroComp | 9d17fa533e8a15c760030df5246ff531ddb4cb22 | [
"MIT"
] | null | null | null | import scipy.stats as stat
import pandas as pd
import plotly.graph_objs as go
from hidrocomp.graphics.distribution_build import DistributionBuild
class GenPareto(DistributionBuild):
def __init__(self, title, shape, location, scale):
super().__init__(title, shape, location, scale)
def cumulative(self):
datas = self._data('cumulative')
data = [go.Scatter(x=datas['peaks'], y=datas['Cumulative'],
name=self.title, line=dict(color='rgb(128, 128, 128)',
width=2))]
bandxaxis = go.layout.XAxis(title="Vazão(m³/s)")
bandyaxis = go.layout.YAxis(title="Probabilidade")
layout = dict(title="GP - Acumulada: %s" % self.title,
showlegend=True,
width=945, height=827,
xaxis=bandxaxis,
yaxis=bandyaxis,
font=dict(family='Time New Roman', size=28, color='rgb(0,0,0)')
)
fig = dict(data=data, layout=layout)
return fig, data
def density(self):
datas = self._data('density')
data = [go.Scatter(x=datas['peaks'], y=datas['Density'],
name=self.title, line=dict(color='rgb(128, 128, 128)',
width=2))]
bandxaxis = go.layout.XAxis(title="Vazão(m³/s)")
bandyaxis = go.layout.YAxis(title="")
layout = dict(title="GP - Densidade: %s" % self.title,
showlegend=True,
width=945, height=827,
xaxis=bandxaxis,
yaxis=bandyaxis,
font=dict(family='Time New Roman', size=28, color='rgb(0,0,0)')
)
fig = dict(data=data, layout=layout)
return fig, data
def _data_density(self):
cumulative = self._data_cumulative()
density = stat.genpareto.pdf(cumulative['peaks'].values, self.shape,
loc=self.location, scale=self.scale)
dic = {'peaks': cumulative['peaks'].values, 'Density': density}
return pd.DataFrame(dic)
def _data_cumulative(self):
probability = list()
for i in range(1, 1000):
probability.append(i/1000)
quantiles = stat.genpareto.ppf(probability, self.shape,
loc=self.location,
scale=self.scale)
dic = {'peaks': quantiles, 'Cumulative': probability}
return pd.DataFrame(dic)
| 34.233766 | 85 | 0.518968 | 2,489 | 0.942803 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.109091 |
dc274952ffbbbc7758469183c1112ecd840c0d35 | 18,853 | py | Python | plugins/misc.py | gorpo/manicomio_bot_heroku | aa8dc217468d076f26604a209b5798642217c789 | [
"MIT"
] | null | null | null | plugins/misc.py | gorpo/manicomio_bot_heroku | aa8dc217468d076f26604a209b5798642217c789 | [
"MIT"
] | null | null | null | plugins/misc.py | gorpo/manicomio_bot_heroku | aa8dc217468d076f26604a209b5798642217c789 | [
"MIT"
] | null | null | null |
import html
import re
import random
import amanobot
import aiohttp
from amanobot.exception import TelegramError
import time
from config import bot, sudoers, logs, bot_username
from utils import send_to_dogbin, send_to_hastebin
async def misc(msg):
if msg.get('text'):
#aqui ele repete as coisas com echo kkjjj
if msg['text'].startswith('fala') or msg['text'].startswith('/echo')or msg['text'].startswith('echo') or msg['text'] == '/echo@' + bot_username:
print('Usuario {} solicitou echo'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou echo --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
if msg.get('reply_to_message'):
reply_id = msg['reply_to_message']['message_id']
else:
reply_id = None
await bot.sendMessage(msg['chat']['id'],'{} pra caralho'.format(msg['text'][5:]),
reply_to_message_id=reply_id)
return True
#owna nasa ele responde nasa pra caralho kkjj
elif msg['text'].startswith('owna'):
print('Usuario {} solicitou owna'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou owna --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
if msg.get('reply_to_message'):
reply_id = msg['reply_to_message']['message_id']
else:
reply_id = None
await bot.sendMessage(msg['chat']['id'],'{} pra caralho esta porra filho da puta!'.format(msg['text'][5:]),
reply_to_message_id=reply_id)
return True
#sla mano
elif msg['text'].startswith('sla') :
if msg.get('reply_to_message'):
reply_id = msg['reply_to_message']['message_id']
else:
reply_id = None
await bot.sendMessage(msg['chat']['id'],'{} ta foda este lance.'.format(msg['text'][4:]),
reply_to_message_id=reply_id)
return True
elif msg['text'].startswith('/mark') or msg['text'].startswith('!mark') or msg['text'] == '/mark@' + bot_username:
print('Usuario {} solicitou /mark'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou /,ark --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
if msg.get('reply_to_message'):
reply_id = msg['reply_to_message']['message_id']
else:
reply_id = None
await bot.sendMessage(msg['chat']['id'], msg['text'][6:], 'markdown',
reply_to_message_id=reply_id)
return True
elif msg['text'] == '/admins' or msg['text'] == '/admin' or msg['text'] == 'admin' or msg['text'] == '/admin@' + bot_username:
print('Usuario {} solicitou /admins'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou /admin --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
if msg['chat']['type'] == 'private':
await bot.sendMessage(msg['chat']['id'], 'Este comando s? funciona em grupos ?\\_(?)_/?')
else:
adms = await bot.getChatAdministrators(msg['chat']['id'])
names = 'Admins:\n\n'
for num, user in enumerate(adms):
names += '{} - <a href="tg://user?id={}">{}</a>\n'.format(num + 1, user['user']['id'],
html.escape(user['user']['first_name']))
await bot.sendMessage(msg['chat']['id'], names, 'html',
reply_to_message_id=msg['message_id'])
return True
elif msg['text'].startswith('/token') or msg['text'].startswith('!token') or msg['text'] == '/token@' + bot_username:
print('Usuario {} solicitou /token'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou /token --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
text = msg['text'][7:]
try:
bot_token = amanobot.Bot(text).getMe()
bot_name = bot_token['first_name']
bot_user = bot_token['username']
bot_id = bot_token['id']
await bot.sendMessage(msg['chat']['id'], f'''informacoes do bot:
Nome: {bot_name}
Username: @{bot_user}
ID: {bot_id}''',
reply_to_message_id=msg['message_id'])
except TelegramError:
await bot.sendMessage(msg['chat']['id'], 'Token invalido.',
reply_to_message_id=msg['message_id'])
return True
elif msg['text'].startswith('/bug') or msg['text'].startswith('!bug') or msg['text'] == '/bug@' + bot_username:
print('Usuario {} solicitou /bug'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou /bug --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
text = msg['text'][5:]
if text == '' or text == bot_username:
await bot.sendMessage(msg['chat']['id'], '''*Uso:* `/bug <descrição do bug>` - _Reporta erro/bug para a equipe de desenvolvimento deste bot, so devem ser reportados bugs sobre este bot!_
obs.: Mal uso há possibilidade de ID\_ban''', 'markdown',
reply_to_message_id=msg['message_id'])
else:
await bot.sendMessage(logs, f"""<a href="tg://user?id={msg['from']['id']}">{msg['from'][
'first_name']}</a> reportou um bug:
ID: <code>{msg['from']['id']}</code>
Mensagem: {text}""", 'HTML')
await bot.sendMessage(msg['chat']['id'], 'O bug foi reportado com sucesso para a minha equipe!',
reply_to_message_id=msg['message_id'])
return True
elif msg['text'].startswith('/dogbin') or msg['text'].startswith('!dogbin') or msg['text'] == '/dogbin@' + bot_username or msg['text'] == 'dogbin':
print('Usuario {} solicitou /dogbin'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou /dogbin --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
text = msg['text'][8:] or msg.get('reply_to_message', {}).get('text')
if not text:
await bot.sendMessage(msg['chat']['id'],
'''*Uso:* `/dogbin <texto>` - _envia um texto para o del.dog._''',
'markdown',
reply_to_message_id=msg['message_id'])
else:
link = await send_to_dogbin(text)
await bot.sendMessage(msg['chat']['id'], link, disable_web_page_preview=True,
reply_to_message_id=msg['message_id'])
return True
elif msg['text'].startswith('/hastebin') or msg['text'].startswith('!hastebin') or msg['text'] == '/hastebin@' + bot_username or msg['text'] == 'hastebin' or msg['text'] == 'pastebin':
print('Usuario {} solicitou /hastebin'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou /hastebin --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
text = msg['text'][9:] or msg.get('reply_to_message', {}).get('text')
if not text:
await bot.sendMessage(msg['chat']['id'],
'''*Uso:* `/hastebin <texto>` - _envia um texto para o hastebin._''',
'markdown',
reply_to_message_id=msg['message_id'])
else:
link = await send_to_hastebin(text)
await bot.sendMessage(msg['chat']['id'], link, disable_web_page_preview=True,
reply_to_message_id=msg['message_id'])
return True
elif msg['text'].startswith('/html') or msg['text'].startswith('!html') or msg['text'] == '/html@' + bot_username or msg['text'] == 'html':
print('Usuario {} solicitou /html'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou /html --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
if msg.get('reply_to_message'):
reply_id = msg['reply_to_message']['message_id']
else:
reply_id = None
await bot.sendMessage(msg['chat']['id'], msg['text'][6:], 'html',
reply_to_message_id=reply_id)
return True
elif msg['text'] == 'ban' or msg['text'] == '/ban' or msg['text'] == '/gorpo@' + bot_username:
print('Usuario {} solicitou ban'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou ban --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
try:
await bot.unbanChatMember(msg['chat']['id'], msg['from']['id'])
except TelegramError:
await bot.sendMessage(msg['chat']['id'],
'Nao deu pra te remover, voce.... deve ser um admin ou eu nao sou admin nesta bosta.',
reply_to_message_id=msg['message_id'])
return True
elif msg['text'].startswith('/request') or msg['text'].startswith('!request') or msg['text'] == '/request@' + bot_username or msg['text'] == 'request':
if re.match(r'^(https?)://', msg['text'][9:]):
text = msg['text'][9:]
else:
text = 'http://' + msg['text'][9:]
try:
async with aiohttp.ClientSession() as session:
r = await session.get(text)
except Exception as e:
return await bot.sendMessage(msg['chat']['id'], str(e),
reply_to_message_id=msg['message_id'])
headers = '<b>Status-Code:</b> <code>{}</code>\n'.format(str(r.status))
headers += '\n'.join('<b>{}:</b> <code>{}</code>'.format(x, html.escape(r.headers[x])) for x in r.headers)
rtext = await r.text()
if len(rtext) > 3000:
content = await r.read()
res = await send_to_dogbin(content)
else:
res = '<code>' + html.escape(rtext) + '</code>'
await bot.sendMessage(msg['chat']['id'], '<b>Headers:</b>\n{}\n\n<b>Conteudo:</b>\n{}'.format(headers, res),
'html', reply_to_message_id=msg['message_id'])
return True
elif msg['text'] == 'suco':
if msg['from']['id'] in sudoers:
is_sudo = 'é gostozinho'
else:
is_sudo = 'tem gosto de bosta'
await bot.sendMessage(msg['chat']['id'], is_sudo + '?',
reply_to_message_id=msg['message_id'])
return True
elif msg['text'].lower() == 'rt' and msg.get('reply_to_message'):
print('Usuario {} solicitou rt'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou rt --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
if msg['reply_to_message'].get('text'):
text = msg['reply_to_message']['text']
elif msg['reply_to_message'].get('caption'):
text = msg['reply_to_message']['caption']
else:
text = None
if text:
if text.lower() != 'rt' or msg['text'] == '/rt@' + bot_username or msg['text'] == 'rtt':
if not re.match('🔃 .* foi gado pra caralho do filho da puta do :\n\n👤 .*', text):
await bot.sendMessage(msg['chat']['id'], '''🔃 <b>{}</b> foi gado pra caralho concordando com o -->
👤 <b>{}</b>: <i>{}</i>'''.format(msg['from']['first_name'], msg['reply_to_message']['from']['first_name'],
text),
parse_mode='HTML',
reply_to_message_id=msg['message_id'])
return True
#---------------------------------------------------------------------------------------------------------------
elif msg['text'].lower() == 'gay' and msg.get('reply_to_message'):
print('Usuario {} solicitou gay'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou gay --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
if msg['reply_to_message'].get('text'):
text = msg['reply_to_message']['text']
elif msg['reply_to_message'].get('caption'):
text = msg['reply_to_message']['caption']
else:
text = None
if text:
if text.lower() != 'rt' or msg['text'] == 'rtt':
if not re.match('🔃 .* chamou de gay e pode sofrer processo do :\n\n👤 .*', text):
await bot.sendMessage(msg['chat']['id'], '''<b>{} pode tomar um processo pois foi estupido para caralho xingando {} de gay, este viado e bicha loca do caralho só porque ele disse</b> <i>{}</i>'''.format(msg['from']['first_name'], msg['reply_to_message']['from']['first_name'],
text),
parse_mode='HTML',
reply_to_message_id=msg['message_id'])
return True
#---------------------------------------------------------------------------------------------------------------
elif msg['text'].lower() == 'pau no cu' or msg['text'].lower() == 'pnc'and msg.get('reply_to_message'):
print('Usuario {} solicitou pau no cu(rt)'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou pau no cu(rt) --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
if msg['reply_to_message'].get('text'):
text = msg['reply_to_message']['text']
elif msg['reply_to_message'].get('caption'):
text = msg['reply_to_message']['caption']
else:
text = None
if text:
if text.lower() != 'rt' or msg['text'] == 'rtt':
if not re.match('🔃 .* chamou de pau no cu e pode sofrer processo do :\n\n👤 .*', text):
await bot.sendMessage(msg['chat']['id'], '''<b>{} xingou e nao deixou baixo para {}, eu nao deixava e cagava o filho da puta na porrada so porque ele disse</b> <i>{}</i>'''.format(msg['from']['first_name'], msg['reply_to_message']['from']['first_name'],
text),
parse_mode='HTML',
reply_to_message_id=msg['message_id'])
return True
elif msg['text'].lower() == 'filho da puta' or msg['text'].lower() == 'pnc'and msg.get('reply_to_message'):
print('Usuario {} solicitou filho da puta(rt)'.format(msg['from']['first_name']))
log = '\nUsuario {} solicitou filho da puta(rt) --> Grupo: {} --> Data/hora:{}'.format(msg['from']['first_name'],msg['chat']['title'],time.ctime())
arquivo = open('logs/grupos.txt','a')
arquivo.write(log)
arquivo.close()
if msg['reply_to_message'].get('text'):
text = msg['reply_to_message']['text']
elif msg['reply_to_message'].get('caption'):
text = msg['reply_to_message']['caption']
else:
text = None
if text:
if text.lower() != 'rt' or msg['text'] == 'rtt':
if not re.match('🔃 .* xingou a mãe do \n\n👤 .*', text):
await bot.sendMessage(msg['chat']['id'], '''<b>{} xingou a mãe do {}, poxa o cara só falou</b> <i>{}</i>'''.format(msg['from']['first_name'], msg['reply_to_message']['from']['first_name'],
text),
parse_mode='HTML',
reply_to_message_id=msg['message_id'])
return True
| 57.831288 | 302 | 0.481038 | 0 | 0 | 0 | 0 | 0 | 0 | 18,604 | 0.984808 | 6,844 | 0.362289 |
dc27e9112b2e7f6835e04b3442e472d51ccba89e | 685 | py | Python | Sprint1Lecture/Module2/demo1_retrievesElement.py | marianvinas/CS_Notes | b43010dda5617336d7295d08f66fa24dbf786144 | [
"MIT"
] | null | null | null | Sprint1Lecture/Module2/demo1_retrievesElement.py | marianvinas/CS_Notes | b43010dda5617336d7295d08f66fa24dbf786144 | [
"MIT"
] | null | null | null | Sprint1Lecture/Module2/demo1_retrievesElement.py | marianvinas/CS_Notes | b43010dda5617336d7295d08f66fa24dbf786144 | [
"MIT"
] | null | null | null | """
Challenge #1:
Write a function that retrieves the last n elements from a list.
Examples:
- last([1, 2, 3, 4, 5], 1) ➞ [5]
- last([4, 3, 9, 9, 7, 6], 3) ➞ [9, 7, 6]
- last([1, 2, 3, 4, 5], 7) ➞ "invalid"
- last([1, 2, 3, 4, 5], 0) ➞ []
Notes:
- Return "invalid" if n exceeds the length of the list.
- Return an empty list if n == 0.
"""
def last(arr, n):
# Your code here
if n > len(arr):
return 'invalid'
elif n == 0:
return []
# main solution
return arr[ len(arr)-n : ]
print(last([1, 2, 3, 4, 5], 1)) #5
print(last([4, 3, 9, 9, 7, 6], 3)) #[9, 7, 6]
print(last([1, 2, 3, 4, 5], 7)) #invalid
print(last([1, 2, 3, 4, 5], 0)) #empty [] | 22.833333 | 64 | 0.512409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.604618 |
dc28a79d86f42844bf4b707c20265a7e843acfab | 25,407 | py | Python | PyEngine3D/OpenGLContext/Texture.py | ubuntunux/PyEngine3D | e5542b5b185e8b9b56fc4669a6f22eb06c386c4f | [
"BSD-2-Clause"
] | 121 | 2017-06-07T19:42:30.000Z | 2022-03-31T04:42:29.000Z | PyEngine3D/OpenGLContext/Texture.py | MatthewPChapdelaine/PyEngine3D | e5542b5b185e8b9b56fc4669a6f22eb06c386c4f | [
"BSD-2-Clause"
] | 16 | 2015-12-21T16:57:55.000Z | 2017-03-06T15:22:37.000Z | PyEngine3D/OpenGLContext/Texture.py | ubuntunux/GuineaPig | f32852ecbfa3ebdbba00afc466719fc78e37361c | [
"BSD-2-Clause"
] | 16 | 2018-01-15T03:12:13.000Z | 2022-03-31T04:42:41.000Z | import traceback
import copy
import gc
from ctypes import c_void_p
import itertools
import array
import math
import numpy as np
from OpenGL.GL import *
from PyEngine3D.Common import logger
from PyEngine3D.Utilities import Singleton, GetClassName, Attributes, Profiler
from PyEngine3D.OpenGLContext import OpenGLContext
def get_numpy_dtype(data_type):
if GL_BYTE == data_type:
return np.int8
elif GL_UNSIGNED_BYTE == data_type:
return np.uint8
elif GL_UNSIGNED_BYTE == data_type:
return np.uint8
elif GL_SHORT == data_type:
return np.int16
elif GL_UNSIGNED_SHORT == data_type:
return np.uint16
elif GL_INT == data_type:
return np.int32
elif GL_UNSIGNED_INT == data_type:
return np.uint32
elif GL_UNSIGNED_INT64 == data_type:
return np.uint64
elif GL_FLOAT == data_type:
return np.float32
elif GL_DOUBLE == data_type:
return np.float64
logger.error('Cannot convert to numpy dtype. UNKOWN DATA TYPE(%s)', data_type)
return np.uint8
def get_internal_format(str_image_mode):
if str_image_mode == "RGBA":
return GL_RGBA8
elif str_image_mode == "RGB":
return GL_RGB8
elif str_image_mode == "L" or str_image_mode == "P" or str_image_mode == "R":
return GL_R8
else:
logger.error("get_internal_format::unknown image mode ( %s )" % str_image_mode)
return GL_RGBA8
def get_texture_format(str_image_mode):
if str_image_mode == "RGBA":
# R,G,B,A order. GL_BGRA is faster than GL_RGBA
return GL_RGBA # GL_BGRA
elif str_image_mode == "RGB":
return GL_RGB
elif str_image_mode == "L" or str_image_mode == "P" or str_image_mode == "R":
return GL_RED
else:
logger.error("get_texture_format::unknown image mode ( %s )" % str_image_mode)
return GL_RGBA
def get_image_mode(texture_internal_format):
if texture_internal_format in (GL_RGBA, GL_BGRA):
return "RGBA"
elif texture_internal_format in (GL_RGB, GL_BGR):
return "RGB"
elif texture_internal_format == GL_RG:
return "RG"
elif texture_internal_format in (GL_R8, GL_R16F, GL_RED, GL_DEPTH_STENCIL, GL_DEPTH_COMPONENT):
return "R"
elif texture_internal_format == GL_LUMINANCE:
return "L"
else:
logger.error("get_image_mode::unknown image format ( %s )" % texture_internal_format)
return "RGBA"
def CreateTexture(**texture_datas):
texture_class = texture_datas.get('texture_type', Texture2D)
if texture_class is not None:
if type(texture_class) is str:
texture_class = eval(texture_class)
return texture_class(**texture_datas)
return None
class Texture:
target = GL_TEXTURE_2D
default_wrap = GL_REPEAT
use_glTexStorage = False
def __init__(self, **texture_data):
self.name = texture_data.get('name')
self.attachment = False
self.image_mode = "RGBA"
self.internal_format = GL_RGBA8
self.texture_format = GL_RGBA
self.sRGB = False
self.clear_color = None
self.multisample_count = 0
self.width = 0
self.height = 0
self.depth = 1
self.data_type = GL_UNSIGNED_BYTE
self.min_filter = GL_LINEAR_MIPMAP_LINEAR
self.mag_filter = GL_LINEAR
self.enable_mipmap = False
self.wrap = self.default_wrap
self.wrap_s = self.default_wrap
self.wrap_t = self.default_wrap
self.wrap_r = self.default_wrap
self.buffer = -1
self.sampler_handle = -1
self.attribute = Attributes()
self.create_texture(**texture_data)
def create_texture(self, **texture_data):
if self.buffer != -1:
self.delete()
self.attachment = False
self.image_mode = texture_data.get('image_mode')
self.internal_format = texture_data.get('internal_format')
self.texture_format = texture_data.get('texture_format')
self.sRGB = texture_data.get('sRGB', False)
self.clear_color = texture_data.get('clear_color')
self.multisample_count = 0
if self.internal_format is None and self.image_mode:
self.internal_format = get_internal_format(self.image_mode)
if self.texture_format is None and self.image_mode:
self.texture_format = get_texture_format(self.image_mode)
if self.image_mode is None and self.texture_format:
self.image_mode = get_image_mode(self.texture_format)
# Convert to sRGB
if self.sRGB:
if self.internal_format == GL_RGB:
self.internal_format = GL_SRGB8
elif self.internal_format == GL_RGBA:
self.internal_format = GL_SRGB8_ALPHA8
if GL_RGBA == self.internal_format:
self.internal_format = GL_RGBA8
if GL_RGB == self.internal_format:
self.internal_format = GL_RGB8
self.width = int(texture_data.get('width', 0))
self.height = int(texture_data.get('height', 0))
self.depth = int(max(1, texture_data.get('depth', 1)))
self.data_type = texture_data.get('data_type', GL_UNSIGNED_BYTE)
self.min_filter = texture_data.get('min_filter', GL_LINEAR_MIPMAP_LINEAR)
self.mag_filter = texture_data.get('mag_filter', GL_LINEAR) # GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR, GL_NEAREST
mipmap_filters = (GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_NEAREST,
GL_NEAREST_MIPMAP_LINEAR, GL_NEAREST_MIPMAP_NEAREST)
self.enable_mipmap = self.min_filter in mipmap_filters
if self.target == GL_TEXTURE_2D_MULTISAMPLE:
self.enable_mipmap = False
self.wrap = texture_data.get('wrap', self.default_wrap) # GL_REPEAT, GL_CLAMP
self.wrap_s = texture_data.get('wrap_s')
self.wrap_t = texture_data.get('wrap_t')
self.wrap_r = texture_data.get('wrap_r')
self.buffer = -1
self.sampler_handle = -1
# texture parameter overwrite
# self.sampler_handle = glGenSamplers(1)
# glSamplerParameteri(self.sampler_handle, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
# glBindSampler(0, self.sampler_handle)
logger.info("Create %s : %s %dx%dx%d %s mipmap(%s)." % (
GetClassName(self), self.name, self.width, self.height, self.depth, str(self.internal_format),
'Enable' if self.enable_mipmap else 'Disable'))
self.attribute = Attributes()
def __del__(self):
pass
def delete(self):
logger.info("Delete %s : %s" % (GetClassName(self), self.name))
glDeleteTextures([self.buffer, ])
self.buffer = -1
def get_texture_info(self):
return dict(
texture_type=self.__class__.__name__,
width=self.width,
height=self.height,
depth=self.depth,
image_mode=self.image_mode,
internal_format=self.internal_format,
texture_format=self.texture_format,
data_type=self.data_type,
min_filter=self.min_filter,
mag_filter=self.mag_filter,
wrap=self.wrap,
wrap_s=self.wrap_s,
wrap_t=self.wrap_t,
wrap_r=self.wrap_r,
)
def get_save_data(self):
save_data = self.get_texture_info()
data = self.get_image_data()
if data is not None:
save_data['data'] = data
return save_data
def get_mipmap_size(self, level=0):
if 0 < level:
divider = 2.0 ** level
width = max(1, int(self.width / divider))
height = max(1, int(self.height / divider))
return width, height
return self.width, self.height
def get_image_data(self, level=0):
if self.target not in (GL_TEXTURE_2D, GL_TEXTURE_2D_ARRAY, GL_TEXTURE_3D):
return None
level = min(level, self.get_mipmap_count())
dtype = get_numpy_dtype(self.data_type)
try:
glBindTexture(self.target, self.buffer)
data = OpenGLContext.glGetTexImage(self.target, level, self.texture_format, self.data_type)
# convert to numpy array
if type(data) is bytes:
data = np.fromstring(data, dtype=dtype)
else:
data = np.array(data, dtype=dtype)
glBindTexture(self.target, 0)
return data
except:
logger.error(traceback.format_exc())
logger.error('%s failed to get image data.' % self.name)
logger.info('Try to glReadPixels.')
glBindTexture(self.target, self.buffer)
fb = glGenFramebuffers(1)
glBindFramebuffer(GL_FRAMEBUFFER, fb)
data = []
for layer in range(self.depth):
if GL_TEXTURE_2D == self.target:
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.buffer, level)
elif GL_TEXTURE_3D == self.target:
glFramebufferTexture3D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_3D, self.buffer, level, layer)
elif GL_TEXTURE_2D_ARRAY == self.target:
glFramebufferTextureLayer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, self.buffer, level, layer)
glReadBuffer(GL_COLOR_ATTACHMENT0)
width, height = self.get_mipmap_size(level)
pixels = glReadPixels(0, 0, width, height, self.texture_format, self.data_type)
# convert to numpy array
if type(pixels) is bytes:
pixels = np.fromstring(pixels, dtype=dtype)
data.append(pixels)
data = np.array(data, dtype=dtype)
glBindTexture(self.target, 0)
glBindFramebuffer(GL_FRAMEBUFFER, 0)
glDeleteFramebuffers(1, [fb, ])
return data
def get_mipmap_count(self):
factor = max(max(self.width, self.height), self.depth)
return math.floor(math.log2(factor)) + 1
def generate_mipmap(self):
if self.enable_mipmap:
glBindTexture(self.target, self.buffer)
glGenerateMipmap(self.target)
else:
logger.warn('%s disable to generate mipmap.' % self.name)
def texure_wrap(self, wrap):
glTexParameteri(self.target, GL_TEXTURE_WRAP_S, wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_T, wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_R, wrap)
def bind_texture(self, wrap=None):
if self.buffer == -1:
logger.warn("%s texture is invalid." % self.name)
return
glBindTexture(self.target, self.buffer)
if wrap is not None:
self.texure_wrap(wrap)
def bind_image(self, image_unit, level=0, access=GL_READ_WRITE):
if self.buffer == -1:
logger.warn("%s texture is invalid." % self.name)
return
# flag : GL_READ_WRITE, GL_WRITE_ONLY, GL_READ_ONLY
glBindImageTexture(image_unit, self.buffer, level, GL_FALSE, 0, access, self.internal_format)
def is_attached(self):
return self.attachment
def set_attachment(self, attachment):
self.attachment = attachment
def get_attribute(self):
self.attribute.set_attribute("name", self.name)
self.attribute.set_attribute("target", self.target)
self.attribute.set_attribute("width", self.width)
self.attribute.set_attribute("height", self.height)
self.attribute.set_attribute("depth", self.depth)
self.attribute.set_attribute("image_mode", self.image_mode)
self.attribute.set_attribute("internal_format", self.internal_format)
self.attribute.set_attribute("texture_format", self.texture_format)
self.attribute.set_attribute("data_type", self.data_type)
self.attribute.set_attribute("min_filter", self.min_filter)
self.attribute.set_attribute("mag_filter", self.mag_filter)
self.attribute.set_attribute("multisample_count", self.multisample_count)
self.attribute.set_attribute("wrap", self.wrap)
self.attribute.set_attribute("wrap_s", self.wrap_s)
self.attribute.set_attribute("wrap_t", self.wrap_t)
self.attribute.set_attribute("wrap_r", self.wrap_r)
return self.attribute
def set_attribute(self, attribute_name, attribute_value, item_info_history, attribute_index):
if hasattr(self, attribute_name) and "" != attribute_value:
setattr(self, attribute_name, eval(attribute_value))
if 'wrap' in attribute_name:
glBindTexture(self.target, self.buffer)
glTexParameteri(self.target, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(self.target, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glBindTexture(self.target, 0)
return self.attribute
class Texture2D(Texture):
target = GL_TEXTURE_2D
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, self.buffer)
if self.use_glTexStorage:
glTexStorage2D(GL_TEXTURE_2D,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height)
if data is not None:
glTexSubImage2D(GL_TEXTURE_2D,
0,
0, 0,
self.width, self.height,
self.texture_format,
self.data_type,
data)
else:
glTexImage2D(GL_TEXTURE_2D,
0,
self.internal_format,
self.width,
self.height,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_2D)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, self.mag_filter)
if self.clear_color is not None:
glClearTexImage(self.buffer, 0, self.texture_format, self.data_type, self.clear_color)
glBindTexture(GL_TEXTURE_2D, 0)
class Texture2DArray(Texture):
target = GL_TEXTURE_2D_ARRAY
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D_ARRAY, self.buffer)
if self.use_glTexStorage:
glTexStorage3D(GL_TEXTURE_2D_ARRAY,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height, self.depth)
if data is not None:
glTexSubImage3D(GL_TEXTURE_2D_ARRAY,
0,
0, 0, 0,
self.width, self.height, self.depth,
self.texture_format,
self.data_type,
data)
else:
glTexImage3D(GL_TEXTURE_2D_ARRAY,
0,
self.internal_format,
self.width,
self.height,
self.depth,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_2D_ARRAY)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_2D_ARRAY, 0)
class Texture3D(Texture):
target = GL_TEXTURE_3D
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
data = texture_data.get('data')
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_3D, self.buffer)
if self.use_glTexStorage:
glTexStorage3D(GL_TEXTURE_3D,
self.get_mipmap_count(),
self.internal_format,
self.width, self.height, self.depth)
if data is not None:
glTexSubImage3D(GL_TEXTURE_3D,
0,
0, 0, 0,
self.width, self.height, self.depth,
self.texture_format,
self.data_type,
data)
else:
glTexImage3D(GL_TEXTURE_3D,
0,
self.internal_format,
self.width,
self.height,
self.depth,
0,
self.texture_format,
self.data_type,
data)
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_3D)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_3D, 0)
class Texture2DMultiSample(Texture):
target = GL_TEXTURE_2D_MULTISAMPLE
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
multisample_count = texture_data.get('multisample_count', 4)
self.multisample_count = multisample_count - (multisample_count % 4)
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, self.buffer)
if self.use_glTexStorage:
glTexStorage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE,
self.multisample_count,
self.internal_format,
self.width,
self.height,
GL_TRUE)
else:
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE,
self.multisample_count,
self.internal_format,
self.width,
self.height,
GL_TRUE)
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0)
class TextureCube(Texture):
target = GL_TEXTURE_CUBE_MAP
default_wrap = GL_REPEAT
def __init__(self, **texture_data):
self.texture_positive_x = None
self.texture_negative_x = None
self.texture_positive_y = None
self.texture_negative_y = None
self.texture_positive_z = None
self.texture_negative_z = None
Texture.__init__(self, **texture_data)
def create_texture(self, **texture_data):
Texture.create_texture(self, **texture_data)
# If texture2d is None then create render target.
face_texture_datas = copy.copy(texture_data)
face_texture_datas.pop('name')
face_texture_datas['texture_type'] = Texture2D
self.texture_positive_x = texture_data.get('texture_positive_x', CreateTexture(name=self.name + "_right", **face_texture_datas))
self.texture_negative_x = texture_data.get('texture_negative_x', CreateTexture(name=self.name + "_left", **face_texture_datas))
self.texture_positive_y = texture_data.get('texture_positive_y', CreateTexture(name=self.name + "_top", **face_texture_datas))
self.texture_negative_y = texture_data.get('texture_negative_y', CreateTexture(name=self.name + "_bottom", **face_texture_datas))
self.texture_positive_z = texture_data.get('texture_positive_z', CreateTexture(name=self.name + "_front", **face_texture_datas))
self.texture_negative_z = texture_data.get('texture_negative_z', CreateTexture(name=self.name + "_back", **face_texture_datas))
self.buffer = glGenTextures(1)
glBindTexture(GL_TEXTURE_CUBE_MAP, self.buffer)
if self.use_glTexStorage:
glTexStorage2D(GL_TEXTURE_CUBE_MAP, self.get_mipmap_count(), self.internal_format, self.width, self.height)
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, self.texture_positive_x) # Right
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, self.texture_negative_x) # Left
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, self.texture_positive_y) # Top
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, self.texture_negative_y) # Bottom
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, self.texture_positive_z) # Front
self.createTexSubImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, self.texture_negative_z) # Back
else:
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X, self.texture_positive_x) # Right
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_X, self.texture_negative_x) # Left
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Y, self.texture_positive_y) # Top
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Y, self.texture_negative_y) # Bottom
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_Z, self.texture_positive_z) # Front
self.createTexImage2D(GL_TEXTURE_CUBE_MAP_NEGATIVE_Z, self.texture_negative_z) # Back
if self.enable_mipmap:
glGenerateMipmap(GL_TEXTURE_CUBE_MAP)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, self.wrap_s or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, self.wrap_t or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, self.wrap_r or self.wrap)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, self.min_filter)
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, self.mag_filter)
glBindTexture(GL_TEXTURE_CUBE_MAP, 0)
@staticmethod
def createTexImage2D(target_face, texture):
glTexImage2D(target_face,
0,
texture.internal_format,
texture.width,
texture.height,
0,
texture.texture_format,
texture.data_type,
texture.get_image_data())
@staticmethod
def createTexSubImage2D(target_face, texture):
glTexSubImage2D(target_face,
0,
0, 0,
texture.width, texture.height,
texture.texture_format,
texture.data_type,
texture.get_image_data())
def delete(self):
super(TextureCube, self).delete()
self.texture_positive_x.delete()
self.texture_negative_x.delete()
self.texture_positive_y.delete()
self.texture_negative_y.delete()
self.texture_positive_z.delete()
self.texture_negative_z.delete()
def get_save_data(self, get_image_data=True):
save_data = Texture.get_save_data(self)
save_data['texture_positive_x'] = self.texture_positive_x.name
save_data['texture_negative_x'] = self.texture_negative_x.name
save_data['texture_positive_y'] = self.texture_positive_y.name
save_data['texture_negative_y'] = self.texture_negative_y.name
save_data['texture_positive_z'] = self.texture_positive_z.name
save_data['texture_negative_z'] = self.texture_negative_z.name
return save_data
def get_attribute(self):
Texture.get_attribute(self)
self.attribute.set_attribute("texture_positive_x", self.texture_positive_x.name)
self.attribute.set_attribute("texture_negative_x", self.texture_negative_x.name)
self.attribute.set_attribute("texture_positive_y", self.texture_positive_y.name)
self.attribute.set_attribute("texture_negative_y", self.texture_negative_y.name)
self.attribute.set_attribute("texture_positive_z", self.texture_positive_z.name)
self.attribute.set_attribute("texture_negative_z", self.texture_negative_z.name)
return self.attribute
| 40.328571 | 137 | 0.628016 | 22,650 | 0.891487 | 0 | 0 | 748 | 0.029441 | 0 | 0 | 1,838 | 0.072342 |
dc28ce1acae4aa8251138dbfcf61874cd8adf79c | 2,480 | py | Python | flask_app/server.py | lychengr3x/twitter-sentiment-service | a3bfb76d3882c34c6b64ee52bf15051085b04548 | [
"MIT"
] | null | null | null | flask_app/server.py | lychengr3x/twitter-sentiment-service | a3bfb76d3882c34c6b64ee52bf15051085b04548 | [
"MIT"
] | null | null | null | flask_app/server.py | lychengr3x/twitter-sentiment-service | a3bfb76d3882c34c6b64ee52bf15051085b04548 | [
"MIT"
] | 1 | 2021-08-08T23:06:08.000Z | 2021-08-08T23:06:08.000Z | """
A server that responds with two pages, one showing the most recent
100 tweets for given user and the other showing the people that follow
that given user (sorted by the number of followers those users have).
For authentication purposes, the server takes a commandline argument
that indicates the file containing Twitter data in a CSV file format:
consumer_key, consumer_secret, access_token, access_token_secret
For example, I pass in my secrets via file name:
/Users/parrt/Dropbox/licenses/twitter.csv
Please keep in mind the limits imposed by the twitter API:
https://dev.twitter.com/rest/public/rate-limits
For example, you can only do 15 follower list fetches per
15 minute window, but you can do 900 user timeline fetches.
Reference:
* https://github.com/parrt/msds692/blob/master/hw/code/sentiment/server.py
"""
import sys
from flask import Flask, render_template
from tweetie import *
from colour import Color
app = Flask(__name__)
def add_color(tweets):
"""
Given a list of tweets, one dictionary per tweet, add
a "color" key to each tweets dictionary with a value
containing a color graded from red to green. Pure red
would be for -1.0 sentiment score and pure green would be for
sentiment score 1.0.
Use colour.Color to get 100 color values in the range
from red to green. Then convert the sentiment score from -1..1
to an index from 0..100. That index gives you the color increment
from the 100 gradients.
This function modifies the dictionary of each tweet. It lives in
the server script because it has to do with display not collecting
tweets.
"""
colors = list(Color("red").range_to(Color("green"), 100))
for t in tweets:
val = min(99, max(0, int((t["score"] + 1) / 2 * 99)))
t["color"] = colors[val]
@app.route("/")
def home():
return "Hello, world!"
@app.route("/sentiment/<name>")
def tweets(name):
"Display the tweets for a screen name color-coded by sentiment score"
fetched_tweets = fetch_tweets(api, name)
if fetched_tweets:
if fetched_tweets["count"]:
add_color(fetched_tweets["tweets"])
return render_template("./tweets.html", fetched_tweets=fetched_tweets)
else:
return f"No tweets for {name}!"
else:
return f"User {name} does not exist!"
twitter_auth_filename = sys.argv[-1]
api = authenticate(twitter_auth_filename)
if __name__ == "__main__":
app.run(debug=True)
| 34.929577 | 82 | 0.711694 | 0 | 0 | 0 | 0 | 526 | 0.212097 | 0 | 0 | 1,710 | 0.689516 |
dc29012448d242df445844af191ebc8db01de799 | 4,625 | py | Python | samples/entity_management.py | czahedi/dialogflow-python-client-v2 | d9150d1def0a7262dc496b2f1313e02e7ae1a0b6 | [
"Apache-2.0"
] | 3 | 2020-07-09T18:52:40.000Z | 2020-07-13T08:46:44.000Z | samples/entity_management.py | czahedi/dialogflow-python-client-v2 | d9150d1def0a7262dc496b2f1313e02e7ae1a0b6 | [
"Apache-2.0"
] | 4 | 2021-03-11T01:15:14.000Z | 2022-02-27T08:02:18.000Z | python-micro-service-master/.venv/lib/site-packages/samples/entity_management.py | hiepvo01/AnimeRecommendationSystem | 662531fc72134caedcd8e1dee7fefd3bdb0017a2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DialogFlow API Entity Python sample showing how to manage entities.
Examples:
python entity_management.py -h
python entity_management.py --project-id PROJECT_ID \
list --entity-type-id e57238e2-e692-44ea-9216-6be1b2332e2a
python entity_management.py --project-id PROJECT_ID \
create new_room --synonyms basement cellar \
--entity-type-id e57238e2-e692-44ea-9216-6be1b2332e2a
python entity_management.py --project-id PROJECT_ID \
delete new_room \
--entity-type-id e57238e2-e692-44ea-9216-6be1b2332e2a
"""
import argparse
def list_entities(project_id, entity_type_id):
import dialogflow_v2 as dialogflow
entity_types_client = dialogflow.EntityTypesClient()
parent = entity_types_client.entity_type_path(
project_id, entity_type_id)
entities = entity_types_client.get_entity_type(parent).entities
for entity in entities:
print('Entity value: {}'.format(entity.value))
print('Entity synonyms: {}\n'.format(entity.synonyms))
# [START dialogflow_create_entity]
def create_entity(project_id, entity_type_id, entity_value, synonyms):
"""Create an entity of the given entity type."""
import dialogflow_v2 as dialogflow
entity_types_client = dialogflow.EntityTypesClient()
# Note: synonyms must be exactly [entity_value] if the
# entity_type's kind is KIND_LIST
synonyms = synonyms or [entity_value]
entity_type_path = entity_types_client.entity_type_path(
project_id, entity_type_id)
entity = dialogflow.types.EntityType.Entity()
entity.value = entity_value
entity.synonyms.extend(synonyms)
response = entity_types_client.batch_create_entities(
entity_type_path, [entity])
print('Entity created: {}'.format(response))
# [END dialogflow_create_entity]
# [START dialogflow_delete_entity]
def delete_entity(project_id, entity_type_id, entity_value):
"""Delete entity with the given entity type and entity value."""
import dialogflow_v2 as dialogflow
entity_types_client = dialogflow.EntityTypesClient()
entity_type_path = entity_types_client.entity_type_path(
project_id, entity_type_id)
entity_types_client.batch_delete_entities(
entity_type_path, [entity_value])
# [END dialogflow_delete_entity]
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--project-id',
help='Project/agent id. Required.',
required=True)
subparsers = parser.add_subparsers(dest='command')
list_parser = subparsers.add_parser(
'list', help=list_entities.__doc__)
list_parser.add_argument(
'--entity-type-id',
help='The id of the entity_type.')
create_parser = subparsers.add_parser(
'create', help=create_entity.__doc__)
create_parser.add_argument(
'entity_value',
help='The entity value to be added.')
create_parser.add_argument(
'--entity-type-id',
help='The id of the entity_type to which to add an entity.',
required=True)
create_parser.add_argument(
'--synonyms',
nargs='*',
help='The synonyms that will map to the provided entity value.',
default=[])
delete_parser = subparsers.add_parser(
'delete', help=delete_entity.__doc__)
delete_parser.add_argument(
'--entity-type-id',
help='The id of the entity_type.',
required=True)
delete_parser.add_argument(
'entity_value',
help='The value of the entity to delete.')
args = parser.parse_args()
if args.command == 'list':
list_entities(args.project_id, args.entity_type_id)
elif args.command == 'create':
create_entity(
args.project_id, args.entity_type_id, args.entity_value,
args.synonyms)
elif args.command == 'delete':
delete_entity(
args.project_id, args.entity_type_id, args.entity_value)
| 33.273381 | 74 | 0.711568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,940 | 0.419459 |
dc291fddc0cc25877fc33ea2cf2e1c41cfdab71b | 2,578 | py | Python | tests/model/library/api/test_casestep_resource.py | Mozilla-GitHub-Standards/2a028a7541b867ed4d376d6d9a172a6885fe44030078c12a2b9428efabf22ab7 | 63b1c5eacfe35d5b80cc3b07e7c67145531ba343 | [
"BSD-2-Clause"
] | null | null | null | tests/model/library/api/test_casestep_resource.py | Mozilla-GitHub-Standards/2a028a7541b867ed4d376d6d9a172a6885fe44030078c12a2b9428efabf22ab7 | 63b1c5eacfe35d5b80cc3b07e7c67145531ba343 | [
"BSD-2-Clause"
] | null | null | null | tests/model/library/api/test_casestep_resource.py | Mozilla-GitHub-Standards/2a028a7541b867ed4d376d6d9a172a6885fe44030078c12a2b9428efabf22ab7 | 63b1c5eacfe35d5b80cc3b07e7c67145531ba343 | [
"BSD-2-Clause"
] | null | null | null | """
Tests for CaseStepResource api.
"""
from tests.case.api.crud import ApiCrudCases
import logging
mozlogger = logging.getLogger('moztrap.test')
class CaseStepResourceTest(ApiCrudCases):
@property
def factory(self):
"""The model factory for this object."""
return self.F.CaseStepFactory()
@property
def resource_name(self):
return "casestep"
@property
def permission(self):
"""String defining the permission required for
Create, Update, and Delete.
"""
return "library.manage_cases"
@property
def new_object_data(self):
"""Generates a dictionary containing the field names and auto-generated
values needed to create a unique object.
The output of this method can be sent in the payload parameter of a
POST message.
"""
modifiers = (self.datetime, self.resource_name)
self.caseversion_fixture = self.F.CaseVersionFactory.create()
fields = {
u"caseversion": unicode(
self.get_detail_url("caseversion", str(self.caseversion_fixture.id))),
u"number": 1,
u"instruction": u"instruction 1 %s" % self.datetime,
u"expected": u"expected 1 %s" % self.datetime,
}
return fields
def backend_object(self, id):
"""Returns the object from the backend, so you can query it's values in
the database for validation.
"""
return self.model.CaseStep.everything.get(id=id)
def backend_data(self, backend_obj):
"""Query's the database for the object's current values. Output is a
dictionary that should match the result of getting the object's detail
via the API, and can be used to verify API output.
Note: both keys and data should be in unicode
"""
actual = {
u"id": backend_obj.id,
u"caseversion": unicode(
self.get_detail_url("caseversion",
str(backend_obj.caseversion.id))),
u"instruction": unicode(backend_obj.instruction),
u"expected": unicode(backend_obj.expected),
u"number": backend_obj.number,
u"resource_uri": unicode(
self.get_detail_url(self.resource_name, str(backend_obj.id)))
}
return actual
@property
def read_create_fields(self):
"""caseversion is read-only"""
return ["caseversion"]
# overrides from crud.py
# additional test cases, if any
# validation cases
| 27.72043 | 86 | 0.617533 | 2,426 | 0.94104 | 0 | 0 | 1,207 | 0.468192 | 0 | 0 | 1,137 | 0.44104 |
dc2a2a350d569d14803aa0f7b78638b2a20f6c96 | 817 | py | Python | match_shull21.py | drvdputt/dust_fuse_h2 | 3bff87d1cb475abd20f4426e18412379aa3ca991 | [
"BSD-3-Clause"
] | null | null | null | match_shull21.py | drvdputt/dust_fuse_h2 | 3bff87d1cb475abd20f4426e18412379aa3ca991 | [
"BSD-3-Clause"
] | null | null | null | match_shull21.py | drvdputt/dust_fuse_h2 | 3bff87d1cb475abd20f4426e18412379aa3ca991 | [
"BSD-3-Clause"
] | null | null | null | """Find stars that are both in our sample and in Shull+21"""
import numpy as np
import get_data
from matplotlib import pyplot as plt
data = get_data.get_merged_table()
shull = get_data.get_shull2021()
matches = [name for name in data["Name"] if name in shull["Name"]]
print(len(matches), " matches found")
print(matches)
data_comp = data[np.isin(data["Name"], matches)]
refs = data_comp['hiref']
shull_comp = shull[np.isin(shull["Name"], matches)]
def compare_shull(param):
plt.figure()
x = shull_comp[param]
y = data_comp[param]
plt.plot(x, x, color="k")
plt.scatter(x, y, c=refs)
plt.colorbar()
plt.ylabel("ours")
plt.xlabel("shull")
plt.title(param)
# compare_shull("nhtot")
compare_shull("EBV")
compare_shull("fh2")
compare_shull("nhi")
compare_shull("nh2")
plt.show()
| 22.081081 | 66 | 0.689106 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.204406 |
dc2ab9c6adb9ff531bcc2889483477c21aa458cb | 7,941 | py | Python | plot/include/states_lib.py | ABRG-Models/Wilson2018EvoGene | cf198f162621e09f44cb96ee991ffe50cf9d96bc | [
"CC-BY-4.0"
] | 1 | 2020-04-19T16:57:02.000Z | 2020-04-19T16:57:02.000Z | plot/include/states_lib.py | ABRG-Models/AttractorScaffolding | cf198f162621e09f44cb96ee991ffe50cf9d96bc | [
"CC-BY-4.0"
] | null | null | null | plot/include/states_lib.py | ABRG-Models/AttractorScaffolding | cf198f162621e09f44cb96ee991ffe50cf9d96bc | [
"CC-BY-4.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from collections import Iterable
mrkr1 = 12
mrkr1_inner = 8
fs = 18
# FUNCTION TO TURN NESTED LIST INTO 1D LIST
def flatten(lis):
for item in lis:
if isinstance(item, Iterable) and not isinstance(item, str):
for x in flatten(item):
yield x
else:
yield item
# FUNCTION TO DRAW TREES
def tree (base, graph, cycle, bias, visits, print_states_hex, docolour):
# find parents
parents = graph[base][0]
for each in cycle:
if each in parents:
parents.remove(each)
# add parents to visits
for a in parents:
visits.append(a)
greycol = (0.4,0.4,0.4)
# add co-ordinates to graph array
l = len(parents)
count = 0
amp = graph[base][2][0]
min_ang = graph[base][2][1]
max_ang = graph[base][2][2]
for b in parents:
graph[b][2][0] = amp + 1
graph[b][2][1] = min_ang + count*(max_ang-min_ang)/l
graph[b][2][2] = min_ang + (count+1)*(max_ang-min_ang)/l
count = count + 1
# draw those on the branches
for c in parents:
mid = (graph[c][2][1] + graph[c][2][2])/2
xco = graph[c][2][0]*np.cos(np.radians(mid))
yco = graph[c][2][0]*np.sin(np.radians(mid)) + bias
graph[c][2][3] = xco
graph[c][2][4] = yco
colo = plt.cm.hsv(c/32)
if c%2==0:
colo2 = plt.cm.flag(c/32.0)
else:
colo2 = plt.cm.prism(c/32.0)
if docolour == False:
colo = 'k'
colo2 = 'k'
#print ('Printing marker for c={0}'.format(c))
plt.plot(xco, yco, 'o', markersize=mrkr1, color=colo)
text_labels=0
if c==21 or c==10 or c==16 or c==0:
text_labels=1
if text_labels:
if print_states_hex:
tt = plt.text(xco+0.25,yco+0.4, '{:02X}'.format(c), ha='center', fontsize=fs)
else:
tt = plt.text(xco+0.25,yco+0.4, '{:d}'.format(c), ha='center', fontsize=fs)
tt.set_bbox(dict(boxstyle='round,pad=0.0', edgecolor='none', facecolor='white', alpha=0.6))
if c==21 or c==10:
selmarker = 'v'
if docolour == False:
colo2 = 'w'
elif c==16 or c==0:
#print ('Printing star for c={0}'.format(c)) # Note in one case, star is red and BG circle is red.
selmarker = '*'
if docolour == False:
selmarker = 'o'
colo2 = 'w'
else:
if (c==0):
print ('printing selmarker for c={0} with BLUE star'.format(c))
colo2='b'
else:
selmarker = 'o'
plt.plot (xco, yco, marker=selmarker, markersize=mrkr1_inner, color=colo2)
plt.arrow(xco, yco, graph[base][2][3]-xco, graph[base][2][4]-yco, overhang=0, length_includes_head=True, head_width=0.15, head_length=0.5, fc=greycol, ec=greycol)
for z in parents:
tree (z, graph, parents, bias, visits, print_states_hex, docolour)
def plot_states (net, ax, print_states_hex=False, kequalsn=True, docolour=True):
# Find where each state leads
targets = []
for i in range(2**5):
state = np.binary_repr(i,5)
# k=n
if kequalsn:
effect = net[int(state,2)] + net[int(state[1:]+state[0:1],2) + 32] + net[int(state[2:]+state[0:2],2) + 64] + net[int(state[3:]+state[0:3],2)+96] + net[int(state[4:]+state[0:4],2)+128]
else:
# k=n-1
effect = net[int(state[1:],2)] + net[int(state[:1]+state[2:],2)+16] + net[int(state[:2]+state[3:],2) + 32] + net[int(state[:3]+state[4],2)+48] + net[int(state[:4],2)+64]
# in decimal form
targets.append(int(effect[4]) + 2*int(effect[3]) + 4*int(effect[2]) + 8*int(effect[1]) + 16*int(effect[0]))
# graph[n] gives the parent nodes, child nodes and co-ordinates for the nth node.
# graph[n][2][0] gives polar amplitude, [1] is min angle, [2] is max angle, [3] is x, [4] is y
graph = [[[],[],[0,0,0,0,0]] for x in range(1024)]
targets = [int(z) for z in targets]
for y in range(32):
graph[y][1] = targets[y] # add child
graph[targets[y]][0].append(y) # add parent
visits = []
greycol = (0.4,0.4,0.4)
plt.xticks([])
plt.yticks([])
bases = []
for x in range(len(targets)):
visits = []
while not x in visits:
visits.append(x)
x = targets[x]
base = visits[visits.index(x):]
# It's awkward to format the list of bases in hex, so it's not implemented
if not base[0] in list(flatten(bases)):
bases.append(base)
for base in bases:
# find co-ordinates of base nodes
tot = len(base)
count = 0
for x in base:
graph[x][2][0] = 1
graph[x][2][1] = count*180/tot
graph[x][2][2] = (count+1)*180/tot
count = count + 1
# find max y-co for bias for next tree
bias = graph[0][2][4]
for node in graph:
if node[2][4]>bias:
bias = node[2][4]
bias = bias + 2
# draw those on the LC
tt = plt.text(0+0.7,bias-2+0.5,base, ha='center', fontsize=fs)
tt.set_bbox(dict(boxstyle='round,pad=0.0', edgecolor='none', facecolor='white', alpha=0.6))
circle = plt.Circle ((0,bias), 1, color=greycol, fill=False)
ax.add_artist(circle)
for x in base:
mid = (graph[x][2][1] + graph[x][2][2])/2.
graph[x][2][3] = graph[x][2][0]*np.cos(np.radians(mid))
graph[x][2][4] = graph[x][2][0]*np.sin(np.radians(mid)) + bias
colo = plt.cm.hsv(x/32)
if x%2==0:
colo2 = plt.cm.flag(x/32.0)
else:
colo2 = plt.cm.prism(x/32.0)
#plt.plot(graph[x][2][3], graph[x][2][4], 'o', color=(0,0,0), markersize=mrkr1)
#print ('Printing marker for c={0}'.format(x))
if docolour == True:
plt.plot(graph[x][2][3], graph[x][2][4], 'o', color=colo, markersize=mrkr1)
else:
plt.plot(graph[x][2][3], graph[x][2][4], 'o', color='k', markersize=mrkr1)
if docolour == False:
colo2 = 'k'
if x==21 or x==10:
selmarker = 'v'
if docolour == False:
colo2 = 'w'
elif x==16 or x==0:
selmarker = '*'
if docolour == False:
selmarker = 'o'
colo2 = 'w'
else:
if x==0:
print ('printing selmarker for x={0} with BLUE star'.format(x))
colo2='b' # special case
else:
selmarker = 'o'
plt.plot(graph[x][2][3], graph[x][2][4], marker=selmarker, color=colo2, markersize=mrkr1_inner)
for x in base:
tree (x, graph, base, bias, visits, print_states_hex, docolour)
# do it again for the next set
# find max y and x to get axis right
max_x = graph[0][2][3]
max_y = graph[0][2][4]
min_x = max_x
for node in graph:
if node[2][4] > max_y:
max_y = node[2][4]
if node[2][3] > max_x:
max_x = node[2][3]
#plt.plot(graph[21][2][3], graph[21][2][4],'v',color='k', markersize=mrkr1-2) # final ant
#plt.plot(graph[10][2][3], graph[10][2][4],'v',color='w', markersize=mrkr1-2) # final post
#plt.plot(graph[16][2][3], graph[16][2][4],'*',color='k', markersize=mrkr1-2) # initial ant
#plt.plot(graph[0][2][3], graph[0][2][4],'*',color='w', markersize=mrkr1-2) # initial post
# Modify use of the area inside the graph
ymin,ymax = plt.ylim()
plt.ylim(ymin-4,ymax+1)
xmin,xmax = plt.xlim()
plt.xlim(xmin-0,xmax+0)
| 34.081545 | 195 | 0.513411 | 0 | 0 | 204 | 0.025689 | 0 | 0 | 0 | 0 | 1,585 | 0.199597 |
dc2c4dce1a1e4e38ae1c081fcc1c569c6a7af185 | 687 | py | Python | tests/forms/test_dm_boolean_field.py | ramya-chan/digitalmarketplace-utils | 63bcdda22d89c19dcdda3844db34a0dbd9a7cb40 | [
"MIT"
] | 3 | 2015-07-24T21:23:36.000Z | 2020-11-28T03:42:16.000Z | tests/forms/test_dm_boolean_field.py | ramya-chan/digitalmarketplace-utils | 63bcdda22d89c19dcdda3844db34a0dbd9a7cb40 | [
"MIT"
] | 350 | 2015-05-05T09:24:39.000Z | 2021-07-27T15:02:35.000Z | tests/forms/test_dm_boolean_field.py | ramya-chan/digitalmarketplace-utils | 63bcdda22d89c19dcdda3844db34a0dbd9a7cb40 | [
"MIT"
] | 16 | 2015-06-13T15:37:13.000Z | 2021-04-10T18:03:18.000Z | import pytest
import wtforms
from dmutils.forms.fields import DMBooleanField
from dmutils.forms.widgets import DMSelectionButtonBase
class BooleanForm(wtforms.Form):
field = DMBooleanField()
@pytest.fixture
def form():
return BooleanForm()
def test_value_is_a_list(form):
assert isinstance(form.field.value, list)
def test_value_is_empty_list_if_there_is_no_selection(form):
assert form.field.value == []
def test_can_be_used_with_a_different_kind_of_selection_button():
class BooleanForm(wtforms.Form):
field = DMBooleanField(widget=DMSelectionButtonBase(type="boolean"))
form = BooleanForm()
assert form.field.widget.type == "boolean"
| 20.818182 | 76 | 0.770015 | 170 | 0.247453 | 0 | 0 | 52 | 0.075691 | 0 | 0 | 18 | 0.026201 |
dc2c4fc807cf79ade7c992fbbf09a7b0b6750697 | 89 | py | Python | Slide21/calculadora_python.py | yuutognr/MiniCursoPython2020 | fcb94de63dfd954377377187ac283e7275655f45 | [
"Apache-2.0"
] | 2 | 2021-07-16T20:12:53.000Z | 2021-09-29T00:08:09.000Z | Slide21/calculadora_python.py | yuutognr/MiniCursoPython2020 | fcb94de63dfd954377377187ac283e7275655f45 | [
"Apache-2.0"
] | null | null | null | Slide21/calculadora_python.py | yuutognr/MiniCursoPython2020 | fcb94de63dfd954377377187ac283e7275655f45 | [
"Apache-2.0"
] | null | null | null | a = 2 ** 50
b = 2 ** 50 * 3
c = 2 ** 50 * 3 - 1000
d = 400 / 2 ** 50 + 50
print(a,b,c,d) | 14.833333 | 22 | 0.404494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
dc2cc9df06b6a9b5960974cd590cc42590a881e2 | 302 | py | Python | onadata/libs/models/base_model.py | gushil/kobocat | 5ce27ed5fbf969b2ce68e8a59dd97ced74686711 | [
"BSD-2-Clause"
] | 38 | 2017-02-28T05:39:40.000Z | 2019-01-16T04:39:04.000Z | onadata/libs/models/base_model.py | gushil/kobocat | 5ce27ed5fbf969b2ce68e8a59dd97ced74686711 | [
"BSD-2-Clause"
] | 48 | 2019-03-18T09:26:31.000Z | 2019-05-27T08:12:03.000Z | onadata/libs/models/base_model.py | gushil/kobocat | 5ce27ed5fbf969b2ce68e8a59dd97ced74686711 | [
"BSD-2-Clause"
] | 5 | 2017-02-22T12:25:19.000Z | 2019-01-15T11:16:40.000Z | from django.db import models
class BaseModel(models.Model):
class Meta:
abstract = True
def reload(self):
new_self = self.__class__.objects.get(pk=self.pk)
# Clear and update the old dict.
self.__dict__.clear()
self.__dict__.update(new_self.__dict__)
| 23.230769 | 57 | 0.652318 | 270 | 0.89404 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.10596 |
dc2e9fb0590446355b2dfc864c3c9f80ebe3266f | 7,025 | py | Python | vikitext/get_text.py | CristinaGHolgado/vikitext | f280153a6c6d94163635e6679ba2073c0a1e2500 | [
"MIT"
] | null | null | null | vikitext/get_text.py | CristinaGHolgado/vikitext | f280153a6c6d94163635e6679ba2073c0a1e2500 | [
"MIT"
] | null | null | null | vikitext/get_text.py | CristinaGHolgado/vikitext | f280153a6c6d94163635e6679ba2073c0a1e2500 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup, SoupStrainer
import bs4
import requests
import csv
import pandas as pd
import os
import re
"""
Module 3 : retrieve text from each article & basic preprocess
"""
ignore_sents = ['Les associations Vikidia', 'Répondre au sondage', 'Aller à :',
'Récupérée de « https', 'Accédez aux articles', 'Catégorie :',
'Le contenu est disponible sous licence', 'Sur d’autres projets',
'Imprimer / exporter', 'Créer un livre', 'Vikidia, l’encyclopédie',
'Attention, à ne pas confondre', 'Cet article est à compléter', '(Aide)',
'Pages liées', 'À propos de Vikidia', 'Pages liées', 'Espaces de noms',
'PageDiscussion', 'LireModifierModifier', 'AccueilPages par thèmes',
'Article au hasardBanque d’imagesLe SavantDemander un article',
'Modifications récentesCommunautéBavardagesAide', 'Vikidia a besoin de toi',
'Pour rester indépendant, nous refusons' ,'Tu peux soutenir Vikidia',
'Tu peux également financer gratuitement', 'Dernière modification de cette page',
'Non connectéDiscussion', 'Notes et références', '↑ Table', 'Voir aussi[',
'Portail de la littérature —', 'Pour aller plus loin[', 'Portail des sciences —',
'Vikiliens[', 'Lien interne[', 'Lien externe[', '• modifier',
'Soit vous avez mal écrit le titre', "L'article a peut-être été supprimé",
"Il peut aussi avoir été renommé sans création", 'Si vous avez récemment créé cet article',
'Créer le wikicode', 'dans les autres articles (aide)',
'Consultez la liste des articles dont le titre', "Cherchez d'autres pages de Wikipédia",
"Wikipédia ne possède pas d'article", 'Cet article est une ébauche',
'Vous pouvez partager vos connaissances en ']
ignore_single_items = ['Confidentialité', 'Avertissements', 'Version mobile', 'Plus', 'Chercher',
'Navigation', 'Contribuer', 'Espaces de noms', 'PageDiscussion',
'Variantes', 'Affichages', 'Menu de navigation', 'Outils personnels',
'Vikidia', 'Wikipédia', 'Outils', 'Notes pour les rédacteurs :',
'Commons (images et médias)', 'Wikivoyage (guides de voyage)',
'Wikidata (base de données)']
ignore_single_items_wiki = ['Aller au contenu', 'Rechercher', 'Imprimer / exporter', 'Modifier les liens',
'Outils personnels', 'Menu de navigation', 'Navigation', 'Contribuer', 'Outils', 'Espaces de noms', 'Variantes',
'Affichages', 'Liens externes', 'Politique de confidentialité', "À propos de Wikipédia",
"Contact", "Développeurs", 'Statistiques', 'Déclaration sur les témoins (cookies)',
'Précédé par', 'Suivi par', 'Références', 'modifier', 'Lien externe', 'Voir aussi']
ignore_sents_wiki = ['Aragonés | ', 'Un article de Wikipédia', 'AccueilPortails', 'Débuter sur Wikipédia', 'Dans d’autres projets',
'Pour les articles homonymes, voir', 'Wikimedia Commons', 'Afficher / masquer', 'EsperantoEspañol',
'EnglishEspañol', 'EnglishEsperanto', 'Vous lisez un « bon article »', 'Bahasa Indonesia', 'La dernière modification de cette page', 'Dans d’autres projets', 'Wikimedia CommonsWikiquote', 'ArticleDiscussion',
'— Wikipédia', 'Non connectéDiscussion', 'Pages liéesSuivi des pages', 'Créer un livre',
'LireModifier', 'Ce document provient de «', 'Catégories cachées : Pages', "Droit d'auteur : les",
"Voyez les conditions d’utilisation pour plus", 'marque déposée de la Wikimedia Foundation',
'organisation de bienfaisance régie par le paragraphe', 'Cette section est vide, insuffisamment',
'(voir la liste des auteurs)', '(comparer avec la version actuelle)', 'Pour toute information complémentaire,',
'/Articles liés']
def content(f, outname):
"""retrieve text from each article
Parameters :
------------
f : str
csv file containing article urls
outname : str
output name
"""
if not os.path.exists('corpus'):
os.makedirs('corpus')
else:
pass
df_content = pd.read_csv(f, sep='\t', encoding='utf-8', quoting=csv.QUOTE_NONE)
print(f'Columns content in input file : TITLE | URL | URL_WIKIPEDIA | URL_VIKIDIA\n')
print("**This can take a while")
print("Extracting article text content from Vikidia ...")
df_content['vikidia_text'] = df_content['URL_VIKIDIA'].apply(lambda x: BeautifulSoup(requests.get(x).text, features="lxml").text.strip())
print("Extracting article text content from Wikipedia ...")
df_content['wikipedia_text'] = df_content['URL_WIKIPEDIA'].apply(lambda x: BeautifulSoup(requests.get(x).text, features="lxml").text.strip())
def clean(col):
'''basic preprocess specific to wiki data'''
df_content[col] = df_content[col].apply(lambda x: re.sub(r'\n+', '__sent__', x).strip()) # remove succeeding line breaks
df_content[col] = df_content[col].apply(lambda x: re.sub('\[.+?\]', '', x)) # remove items inside brackets
df_content[col] = df_content[col].apply(lambda x: [sent for sent in x.split("__sent__") if len(sent) > 3]) # Ignore sent in article text is len < 3
df_content[col] = df_content[col].apply(lambda x: [s for s in x if not any(item in s for item in ignore_sents)])
df_content[col] = df_content[col].apply(lambda x: [item for item in x if item.strip() not in ignore_single_items])
df_content[col] = df_content[col].apply(lambda x: [s for s in x if not any(item in s for item in ignore_sents_wiki)])
df_content[col] = df_content[col].apply(lambda x: [item for item in x if item.strip() not in ignore_single_items_wiki])
df_content[col] = df_content[col].apply(lambda x: x[1:] if 'langues' in x[0] else x[0:]) # ignore first item in list (12 langues, 34 langues...)
if 'vikidia' in col:
df_content[col] = df_content[col].apply(lambda x: x[1:]) # skip article title in position 0
if 'wikipedia' in col:
df_content[col] = df_content[col].apply(lambda x: x[1:] if x[0] == x[1] else x) # remove titles at the beginning ([Acacia, Acacia, Article text...])
df_content[col] = df_content[col].apply(lambda x: [y.strip() for y in x]) # remove spaces at the begnning of sent
df_content[col] = df_content[col].apply(lambda x: [y for y in x if not y.startswith('Portail d')]) # ignore items in list if starts with
df_content[col] = df_content[col].apply(lambda x: [re.sub(r"(\w+[a-z])+([A-ZÂÊÎÔÛÄËÏÖÜÀÆÇÉÈŒÙ]\w+|[0-9])", r"\1 \2", y) for y in x]) # split overlapping words (wordWord)
df_content[col] = df_content[col].apply(lambda x: [y for y in x if not y.startswith("↑ ")])
df_content[col] = df_content[col].apply(lambda x: [y.replace("\xa0"," ") for y in x])
df_content[col] = df_content[col].apply(lambda x: [y for y in x if len(y.split()) > 3]) # ignore items in list that only contain 3 words e.g.: ['Ceinture de Kuiper', 'Cubewano', 'Plutino', 'Objet épars', ...]
df_content[col] = df_content[col].apply(lambda x: [y for y in x if not y.startswith("v · m")])
clean('vikidia_text')
clean('wikipedia_text')
output_name = "corpus/" + outname + ".tsv"
df_content.to_csv(output_name, sep='\t', encoding='utf-8', quoting=csv.QUOTE_NONE)
print("File(s) saved in /corpus")
| 56.653226 | 214 | 0.692954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,348 | 0.607093 |
dc2fabb65e5e786f3ca6a824b28bbd0720153d1b | 4,411 | py | Python | graph_dataset.py | lvrcek/assembly_graph_utils | 00d1f502ec5742071e00121a2f0895709829d3c2 | [
"MIT"
] | null | null | null | graph_dataset.py | lvrcek/assembly_graph_utils | 00d1f502ec5742071e00121a2f0895709829d3c2 | [
"MIT"
] | null | null | null | graph_dataset.py | lvrcek/assembly_graph_utils | 00d1f502ec5742071e00121a2f0895709829d3c2 | [
"MIT"
] | null | null | null | import os
import pickle
import subprocess
import dgl
from dgl.data import DGLDataset
import graph_parser
class AssemblyGraphDataset(DGLDataset):
"""
A dataset to store the assembly graphs.
A class that inherits from the DGLDataset and extends the
functionality by adding additional attributes and processing the
graph data appropriately.
Attributes
----------
root : str
Root directory consisting of other directories where the raw
data can be found (reads in FASTQ format), and where all the
processing results are stored.
tmp_dir : str
Directory inside root where mid-results (output of the raven
assembler) is stored
info_dir : str
Directory where additional graph information is stored
raven_path : str
Path to the raven assembler
"""
def __init__(self, root, specs=None):
"""
Parameters
----------
root : str
Root directory consisting of other directories where the raw
data can be found (reads in FASTQ format), and where all the
processing results are stored.
"""
self.root = os.path.abspath(root)
self.specs = specs
if 'raw' not in os.listdir(self.root):
subprocess.run(f"mkdir 'raw'", shell=True, cwd=self.root)
if 'tmp' not in os.listdir(self.root):
subprocess.run(f"mkdir 'tmp'", shell=True, cwd=self.root)
if 'processed' not in os.listdir(self.root):
subprocess.run(f"mkdir 'processed'", shell=True, cwd=self.root)
if 'info' not in os.listdir(self.root):
subprocess.run(f"mkdir 'info'", shell=True, cwd=self.root)
raw_dir = os.path.join(self.root, 'raw')
save_dir = os.path.join(self.root, 'processed')
self.tmp_dir = os.path.join(self.root, 'tmp')
self.info_dir = os.path.join(self.root, 'info')
self.raven_path = os.path.abspath('vendor/raven_filter/build/bin/raven')
super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)
def has_cache(self):
"""Check if the raw data is already processed and stored."""
return len(os.listdir(self.save_dir)) > 0
# return len(os.listdir(self.save_dir)) == len(os.listdir(self.raw_dir))
def __len__(self):
return len(os.listdir(self.save_dir))
def __getitem__(self, idx):
(graph,), _ = dgl.load_graphs(os.path.join(self.save_dir, str(idx) + '.dgl'))
return idx, graph
def process(self):
"""Process the raw data and save it on the disk."""
if self.specs is None:
threads = 32
filter = 0.99
out = 'assembly.fasta'
else:
threads = self.specs['threads']
filter = self.specs['filter']
out = self.specs['out']
graphia_dir = os.path.join(self.root, 'graphia')
if not os.path.isdir(graphia_dir):
os.mkdir(graphia_dir)
with open(f'{self.root}/dataset_log.txt', 'w') as f:
for cnt, fastq in enumerate(os.listdir(self.raw_dir)):
print(cnt, fastq)
reads_path = os.path.abspath(os.path.join(self.raw_dir, fastq))
print(reads_path)
subprocess.run(f'{self.raven_path} --filter {filter} --weaken -t{threads} -p0 {reads_path} > {out}', shell=True, cwd=self.tmp_dir)
for j in range(1, 2):
print(f'graph {j}')
# processed_path = os.path.join(self.save_dir, f'd{cnt}_g{j}.dgl')
processed_path = os.path.join(self.save_dir, f'{cnt}.dgl')
graph, pred, succ, reads, edges = graph_parser.from_csv(os.path.join(self.tmp_dir, f'graph_{j}.csv'), reads_path)
dgl.save_graphs(processed_path, graph)
pickle.dump(pred, open(f'{self.info_dir}/{cnt}_pred.pkl', 'wb'))
pickle.dump(succ, open(f'{self.info_dir}/{cnt}_succ.pkl', 'wb'))
pickle.dump(reads, open(f'{self.info_dir}/{cnt}_reads.pkl', 'wb'))
pickle.dump(edges, open(f'{self.info_dir}/{cnt}_edges.pkl', 'wb'))
graphia_path = os.path.join(graphia_dir, f'{cnt}_graph.txt')
graph_parser.print_pairwise(graph, graphia_path)
f.write(f'{cnt} - {fastq}\n')
| 40.842593 | 146 | 0.59533 | 4,301 | 0.975062 | 0 | 0 | 0 | 0 | 0 | 0 | 1,775 | 0.402403 |
dc3087b1cd6e043ab247cf9a5e6b80511711cc17 | 1,297 | py | Python | cwa_qr/poster.py | MaZderMind/cwa-qr | 60799315f1508483025e1e57000a5116dea78449 | [
"MIT"
] | 16 | 2021-04-22T07:12:18.000Z | 2022-02-07T04:54:54.000Z | cwa_qr/poster.py | MaZderMind/cwa-qr | 60799315f1508483025e1e57000a5116dea78449 | [
"MIT"
] | 10 | 2021-04-22T15:33:23.000Z | 2022-03-06T10:54:07.000Z | cwa_qr/poster.py | MaZderMind/cwa-qr | 60799315f1508483025e1e57000a5116dea78449 | [
"MIT"
] | 7 | 2021-04-22T12:37:20.000Z | 2021-08-09T05:47:54.000Z | import io
import os
from svgutils import transform as svg_utils
import qrcode.image.svg
from cwa_qr import generate_qr_code, CwaEventDescription
class CwaPoster(object):
POSTER_PORTRAIT = 'portrait'
POSTER_LANDSCAPE = 'landscape'
TRANSLATIONS = {
POSTER_PORTRAIT: {
'file': 'poster/portrait.svg',
'x': 80,
'y': 60,
'scale': 6
},
POSTER_LANDSCAPE: {
'file': 'poster/landscape.svg',
'x': 42,
'y': 120,
'scale': 4.8
}
}
def generate_poster(event_description: CwaEventDescription, template: CwaPoster) -> svg_utils.SVGFigure:
qr = generate_qr_code(event_description)
svg = qr.make_image(image_factory=qrcode.image.svg.SvgPathImage)
svg_bytes = io.BytesIO()
svg.save(svg_bytes)
poster = svg_utils.fromfile('{}/{}'.format(
os.path.dirname(os.path.abspath(__file__)),
CwaPoster.TRANSLATIONS[template]['file']
))
overlay = svg_utils.fromstring(svg_bytes.getvalue().decode('UTF-8')).getroot()
overlay.moveto(
CwaPoster.TRANSLATIONS[template]['x'],
CwaPoster.TRANSLATIONS[template]['y'],
CwaPoster.TRANSLATIONS[template]['scale']
)
poster.append([overlay])
return poster
| 27.020833 | 104 | 0.625289 | 416 | 0.32074 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.104086 |
dc31324d3932732b9cf6cec61f3b7fc77261b755 | 335 | py | Python | nvchecker/api.py | ypsilik/nvchecker | 9d2d47ed15929c0698f4c9d34fa018d52413d3b3 | [
"MIT"
] | null | null | null | nvchecker/api.py | ypsilik/nvchecker | 9d2d47ed15929c0698f4c9d34fa018d52413d3b3 | [
"MIT"
] | null | null | null | nvchecker/api.py | ypsilik/nvchecker | 9d2d47ed15929c0698f4c9d34fa018d52413d3b3 | [
"MIT"
] | null | null | null | # MIT licensed
# Copyright (c) 2020 lilydjwg <lilydjwg@gmail.com>, et al.
from .httpclient import session, TemporaryError, HTTPError
from .util import (
Entry, BaseWorker, RawResult, VersionResult,
AsyncCache, KeyManager, GetVersionError,
)
from .sortversion import sort_version_keys
from .ctxvars import tries, proxy, user_agent
| 30.454545 | 58 | 0.78806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.214925 |
dc32be1783d77b9191091cbcb6f95353fc69befc | 15,298 | py | Python | examples/psi4_interface/ccsd.py | maxscheurer/pdaggerq | e9fef3466e0d0170afc3094ab79e603200e78dfb | [
"Apache-2.0"
] | 37 | 2020-09-17T19:29:18.000Z | 2022-03-03T16:29:16.000Z | examples/psi4_interface/ccsd.py | maxscheurer/pdaggerq | e9fef3466e0d0170afc3094ab79e603200e78dfb | [
"Apache-2.0"
] | 7 | 2021-02-28T19:22:12.000Z | 2022-02-22T15:17:47.000Z | examples/psi4_interface/ccsd.py | maxscheurer/pdaggerq | e9fef3466e0d0170afc3094ab79e603200e78dfb | [
"Apache-2.0"
] | 6 | 2021-02-16T22:34:29.000Z | 2021-12-04T19:37:23.000Z | # pdaggerq - A code for bringing strings of creation / annihilation operators to normal order.
# Copyright (C) 2020 A. Eugene DePrince III
#
# This file is part of the pdaggerq package.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
spin-orbital CCSD amplitude equations
"""
import numpy as np
from numpy import einsum
def ccsd_energy(t1, t2, f, g, o, v):
# < 0 | e(-T) H e(T) | 0> :
# 1.0000 f(i,i)
energy = 1.000000000000000 * einsum('ii', f[o, o])
# 1.0000 f(i,a)*t1(a,i)
energy += 1.000000000000000 * einsum('ia,ai', f[o, v], t1)
# -0.5000 <j,i||j,i>
energy += -0.500000000000000 * einsum('jiji', g[o, o, o, o])
# 0.2500 <j,i||a,b>*t2(a,b,j,i)
energy += 0.250000000000000 * einsum('jiab,abji', g[o, o, v, v], t2)
# -0.5000 <j,i||a,b>*t1(a,i)*t1(b,j)
energy += -0.500000000000000 * einsum('jiab,ai,bj', g[o, o, v, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
return energy
def singles_residual(t1, t2, f, g, o, v):
# < 0 | m* e e(-T) H e(T) | 0> :
# 1.0000 f(e,m)
singles_res = 1.000000000000000 * einsum('em->em', f[v, o])
# -1.0000 f(i,m)*t1(e,i)
singles_res += -1.000000000000000 * einsum('im,ei->em', f[o, o], t1)
# 1.0000 f(e,a)*t1(a,m)
singles_res += 1.000000000000000 * einsum('ea,am->em', f[v, v], t1)
# -1.0000 f(i,a)*t2(a,e,m,i)
singles_res += -1.000000000000000 * einsum('ia,aemi->em', f[o, v], t2)
# -1.0000 f(i,a)*t1(a,m)*t1(e,i)
singles_res += -1.000000000000000 * einsum('ia,am,ei->em', f[o, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <i,e||a,m>*t1(a,i)
singles_res += 1.000000000000000 * einsum('ieam,ai->em', g[o, v, v, o], t1)
# -0.5000 <j,i||a,m>*t2(a,e,j,i)
singles_res += -0.500000000000000 * einsum('jiam,aeji->em', g[o, o, v, o], t2)
# -0.5000 <i,e||a,b>*t2(a,b,m,i)
singles_res += -0.500000000000000 * einsum('ieab,abmi->em', g[o, v, v, v], t2)
# 1.0000 <j,i||a,b>*t1(a,i)*t2(b,e,m,j)
singles_res += 1.000000000000000 * einsum('jiab,ai,bemj->em', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
# 0.5000 <j,i||a,b>*t1(a,m)*t2(b,e,j,i)
singles_res += 0.500000000000000 * einsum('jiab,am,beji->em', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# 0.5000 <j,i||a,b>*t1(e,i)*t2(a,b,m,j)
singles_res += 0.500000000000000 * einsum('jiab,ei,abmj->em', g[o, o, v, v], t1, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# 1.0000 <j,i||a,m>*t1(a,i)*t1(e,j)
singles_res += 1.000000000000000 * einsum('jiam,ai,ej->em', g[o, o, v, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <i,e||a,b>*t1(a,i)*t1(b,m)
singles_res += 1.000000000000000 * einsum('ieab,ai,bm->em', g[o, v, v, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 <j,i||a,b>*t1(a,i)*t1(b,m)*t1(e,j)
singles_res += 1.000000000000000 * einsum('jiab,ai,bm,ej->em', g[o, o, v, v], t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
return singles_res
def doubles_residual(t1, t2, f, g, o, v):
# < 0 | m* n* f e e(-T) H e(T) | 0> :
# -1.0000 P(m,n)f(i,n)*t2(e,f,m,i)
contracted_intermediate = -1.000000000000000 * einsum('in,efmi->efmn', f[o, o], t2)
doubles_res = 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# 1.0000 P(e,f)f(e,a)*t2(a,f,m,n)
contracted_intermediate = 1.000000000000000 * einsum('ea,afmn->efmn', f[v, v], t2)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# -1.0000 P(m,n)f(i,a)*t1(a,n)*t2(e,f,m,i)
contracted_intermediate = -1.000000000000000 * einsum('ia,an,efmi->efmn', f[o, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# -1.0000 P(e,f)f(i,a)*t1(e,i)*t2(a,f,m,n)
contracted_intermediate = -1.000000000000000 * einsum('ia,ei,afmn->efmn', f[o, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# 1.0000 <e,f||m,n>
doubles_res += 1.000000000000000 * einsum('efmn->efmn', g[v, v, o, o])
# 1.0000 P(e,f)<i,e||m,n>*t1(f,i)
contracted_intermediate = 1.000000000000000 * einsum('iemn,fi->efmn', g[o, v, o, o], t1)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# 1.0000 P(m,n)<e,f||a,n>*t1(a,m)
contracted_intermediate = 1.000000000000000 * einsum('efan,am->efmn', g[v, v, v, o], t1)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# 0.5000 <j,i||m,n>*t2(e,f,j,i)
doubles_res += 0.500000000000000 * einsum('jimn,efji->efmn', g[o, o, o, o], t2)
# 1.0000 P(m,n)*P(e,f)<i,e||a,n>*t2(a,f,m,i)
contracted_intermediate = 1.000000000000000 * einsum('iean,afmi->efmn', g[o, v, v, o], t2)
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate) + -1.00000 * einsum('efmn->femn', contracted_intermediate) + 1.00000 * einsum('efmn->fenm', contracted_intermediate)
# 0.5000 <e,f||a,b>*t2(a,b,m,n)
doubles_res += 0.500000000000000 * einsum('efab,abmn->efmn', g[v, v, v, v], t2)
# 1.0000 P(m,n)<j,i||a,n>*t1(a,i)*t2(e,f,m,j)
contracted_intermediate = 1.000000000000000 * einsum('jian,ai,efmj->efmn', g[o, o, v, o], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# 0.5000 P(m,n)<j,i||a,n>*t1(a,m)*t2(e,f,j,i)
contracted_intermediate = 0.500000000000000 * einsum('jian,am,efji->efmn', g[o, o, v, o], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# -1.0000 P(m,n)*P(e,f)<j,i||a,n>*t1(e,i)*t2(a,f,m,j)
contracted_intermediate = -1.000000000000000 * einsum('jian,ei,afmj->efmn', g[o, o, v, o], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate) + -1.00000 * einsum('efmn->femn', contracted_intermediate) + 1.00000 * einsum('efmn->fenm', contracted_intermediate)
# 1.0000 P(e,f)<i,e||a,b>*t1(a,i)*t2(b,f,m,n)
contracted_intermediate = 1.000000000000000 * einsum('ieab,ai,bfmn->efmn', g[o, v, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# -1.0000 P(m,n)*P(e,f)<i,e||a,b>*t1(a,n)*t2(b,f,m,i)
contracted_intermediate = -1.000000000000000 * einsum('ieab,an,bfmi->efmn', g[o, v, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate) + -1.00000 * einsum('efmn->femn', contracted_intermediate) + 1.00000 * einsum('efmn->fenm', contracted_intermediate)
# 0.5000 P(e,f)<i,e||a,b>*t1(f,i)*t2(a,b,m,n)
contracted_intermediate = 0.500000000000000 * einsum('ieab,fi,abmn->efmn', g[o, v, v, v], t1, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# -1.0000 <j,i||m,n>*t1(e,i)*t1(f,j)
doubles_res += -1.000000000000000 * einsum('jimn,ei,fj->efmn', g[o, o, o, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 P(m,n)*P(e,f)<i,e||a,n>*t1(a,m)*t1(f,i)
contracted_intermediate = 1.000000000000000 * einsum('iean,am,fi->efmn', g[o, v, v, o], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate) + -1.00000 * einsum('efmn->femn', contracted_intermediate) + 1.00000 * einsum('efmn->fenm', contracted_intermediate)
# -1.0000 <e,f||a,b>*t1(a,n)*t1(b,m)
doubles_res += -1.000000000000000 * einsum('efab,an,bm->efmn', g[v, v, v, v], t1, t1, optimize=['einsum_path', (0, 1), (0, 1)])
# -0.5000 P(m,n)<j,i||a,b>*t2(a,b,n,i)*t2(e,f,m,j)
contracted_intermediate = -0.500000000000000 * einsum('jiab,abni,efmj->efmn', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# 0.2500 <j,i||a,b>*t2(a,b,m,n)*t2(e,f,j,i)
doubles_res += 0.250000000000000 * einsum('jiab,abmn,efji->efmn', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
# -0.5000 <j,i||a,b>*t2(a,e,j,i)*t2(b,f,m,n)
doubles_res += -0.500000000000000 * einsum('jiab,aeji,bfmn->efmn', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
# 1.0000 P(m,n)<j,i||a,b>*t2(a,e,n,i)*t2(b,f,m,j)
contracted_intermediate = 1.000000000000000 * einsum('jiab,aeni,bfmj->efmn', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 1), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# -0.5000 <j,i||a,b>*t2(a,e,m,n)*t2(b,f,j,i)
doubles_res += -0.500000000000000 * einsum('jiab,aemn,bfji->efmn', g[o, o, v, v], t2, t2, optimize=['einsum_path', (0, 2), (0, 1)])
# 1.0000 P(m,n)<j,i||a,b>*t1(a,i)*t1(b,n)*t2(e,f,m,j)
contracted_intermediate = 1.000000000000000 * einsum('jiab,ai,bn,efmj->efmn', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# 1.0000 P(e,f)<j,i||a,b>*t1(a,i)*t1(e,j)*t2(b,f,m,n)
contracted_intermediate = 1.000000000000000 * einsum('jiab,ai,ej,bfmn->efmn', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# -0.5000 <j,i||a,b>*t1(a,n)*t1(b,m)*t2(e,f,j,i)
doubles_res += -0.500000000000000 * einsum('jiab,an,bm,efji->efmn', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
# 1.0000 P(m,n)*P(e,f)<j,i||a,b>*t1(a,n)*t1(e,i)*t2(b,f,m,j)
contracted_intermediate = 1.000000000000000 * einsum('jiab,an,ei,bfmj->efmn', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate) + -1.00000 * einsum('efmn->femn', contracted_intermediate) + 1.00000 * einsum('efmn->fenm', contracted_intermediate)
# -0.5000 <j,i||a,b>*t1(e,i)*t1(f,j)*t2(a,b,m,n)
doubles_res += -0.500000000000000 * einsum('jiab,ei,fj,abmn->efmn', g[o, o, v, v], t1, t1, t2, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
# -1.0000 P(m,n)<j,i||a,n>*t1(a,m)*t1(e,i)*t1(f,j)
contracted_intermediate = -1.000000000000000 * einsum('jian,am,ei,fj->efmn', g[o, o, v, o], t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->efnm', contracted_intermediate)
# -1.0000 P(e,f)<i,e||a,b>*t1(a,n)*t1(b,m)*t1(f,i)
contracted_intermediate = -1.000000000000000 * einsum('ieab,an,bm,fi->efmn', g[o, v, v, v], t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 2), (0, 1)])
doubles_res += 1.00000 * contracted_intermediate + -1.00000 * einsum('efmn->femn', contracted_intermediate)
# 1.0000 <j,i||a,b>*t1(a,n)*t1(b,m)*t1(e,i)*t1(f,j)
doubles_res += 1.000000000000000 * einsum('jiab,an,bm,ei,fj->efmn', g[o, o, v, v], t1, t1, t1, t1, optimize=['einsum_path', (0, 1), (0, 3), (0, 2), (0, 1)])
return doubles_res
def ccsd_iterations(t1, t2, fock, g, o, v, e_ai, e_abij, hf_energy, max_iter=100,
e_convergence=1e-8,r_convergence=1e-8,diis_size=None, diis_start_cycle=4):
# initialize diis if diis_size is not None
# else normal scf iterate
if diis_size is not None:
from diis import DIIS
diis_update = DIIS(diis_size, start_iter=diis_start_cycle)
t1_dim = t1.size
old_vec = np.hstack((t1.flatten(), t2.flatten()))
fock_e_ai = np.reciprocal(e_ai)
fock_e_abij = np.reciprocal(e_abij)
old_energy = ccsd_energy(t1, t2, fock, g, o, v)
print("")
print(" ==> CCSD amplitude equations <==")
print("")
print(" Iter Energy |dE| |dT|")
for idx in range(max_iter):
residual_singles = singles_residual(t1, t2, fock, g, o, v)
residual_doubles = doubles_residual(t1, t2, fock, g, o, v)
res_norm = np.linalg.norm(residual_singles) + np.linalg.norm(residual_doubles)
singles_res = residual_singles + fock_e_ai * t1
doubles_res = residual_doubles + fock_e_abij * t2
new_singles = singles_res * e_ai
new_doubles = doubles_res * e_abij
# diis update
if diis_size is not None:
vectorized_iterate = np.hstack(
(new_singles.flatten(), new_doubles.flatten()))
error_vec = old_vec - vectorized_iterate
new_vectorized_iterate = diis_update.compute_new_vec(vectorized_iterate,
error_vec)
new_singles = new_vectorized_iterate[:t1_dim].reshape(t1.shape)
new_doubles = new_vectorized_iterate[t1_dim:].reshape(t2.shape)
old_vec = new_vectorized_iterate
current_energy = ccsd_energy(new_singles, new_doubles, fock, g, o, v)
delta_e = np.abs(old_energy - current_energy)
print(" {: 5d} {: 20.12f} {: 20.12f} {: 20.12f}".format(idx, current_energy - hf_energy, delta_e, res_norm))
if delta_e < e_convergence and res_norm < r_convergence:
# assign t1 and t2 variables for future use before breaking
t1 = new_singles
t2 = new_doubles
break
else:
# assign t1 and t2 and old_energy for next iteration
t1 = new_singles
t2 = new_doubles
old_energy = current_energy
else:
raise ValueError("CCSD iterations did not converge")
return t1, t2
| 54.441281 | 233 | 0.59413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,967 | 0.324683 |
dc353a0a9a2f3afa4d5df4e0a5dd29cb203037fa | 803 | py | Python | pageviews.py | priyankamandikal/arowf | 0b0226da6c1f0d06360c9243334977f885b70a4c | [
"Apache-2.0"
] | 7 | 2017-10-09T05:39:14.000Z | 2019-06-26T18:26:40.000Z | pageviews.py | priyankamandikal/minireview | 0b0226da6c1f0d06360c9243334977f885b70a4c | [
"Apache-2.0"
] | null | null | null | pageviews.py | priyankamandikal/minireview | 0b0226da6c1f0d06360c9243334977f885b70a4c | [
"Apache-2.0"
] | 3 | 2017-03-20T05:56:05.000Z | 2018-12-19T03:07:09.000Z | from datetime import date, datetime, timedelta
from traceback import format_exc
from requests import get
pageviews_url = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/per-article'
def format_date(d):
return datetime.strftime(d, '%Y%m%d%H')
def article_views(article, project='en.wikipedia', access='all-access', agent='all-agents', granularity='daily', start=None, end=None):
endDate = date.today()
startDate = endDate - timedelta(30)
url = '/'.join([pageviews_url, project, access, agent, article, 'daily', format_date(startDate), format_date(endDate)])
try:
result = get(url).json()
last30dayscount = 0
for item in result['items']:
last30dayscount += item['views']
return last30dayscount
except:
print 'Error while fetching page views of ' + article
print format_exc() | 34.913043 | 135 | 0.737235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.225405 |
dc362bba46a8f158a18291beafd2550a99917d83 | 1,848 | py | Python | nikola/data/themes/base/messages/messages_eo.py | vault-the/nikola | c7a556b587004df442fdc9127c07997715556265 | [
"MIT"
] | 1 | 2021-01-03T01:54:37.000Z | 2021-01-03T01:54:37.000Z | nikola/data/themes/base/messages/messages_eo.py | vault-the/nikola | c7a556b587004df442fdc9127c07997715556265 | [
"MIT"
] | 13 | 2021-01-21T04:54:51.000Z | 2022-03-21T04:15:56.000Z | nikola/data/themes/base/messages/messages_eo.py | vault-the/nikola | c7a556b587004df442fdc9127c07997715556265 | [
"MIT"
] | null | null | null | # -*- encoding:utf-8 -*-
"""Autogenerated file, do not edit. Submit translations on Transifex."""
MESSAGES = {
"%d min remaining to read": "%d minutoj por legi",
"(active)": "(aktiva)",
"Also available in:": "Ankaŭ disponebla en:",
"Archive": "Arkivo",
"Atom feed": "",
"Authors": "Aŭtoroj",
"Categories": "Kategorioj",
"Comments": "Komentoj",
"LANGUAGE": "Esperante",
"Languages:": "Lingvoj:",
"More posts about %s": "Pli da artikoloj pri %s",
"Newer posts": "Pli novaj artikoloj",
"Next post": "Venonta artikolo",
"Next": "",
"No posts found.": "Neniu artikoloj trovitaj.",
"Nothing found.": "Nenio trovita.",
"Older posts": "Pli malnovaj artikoloj",
"Original site": "Originala retejo",
"Posted:": "Skribita:",
"Posts about %s": "Artikoloj pri %s",
"Posts by %s": "Artikoloj de %s",
"Posts for year %s": "Artikoloj de la jaro %s",
"Posts for {month} {day}, {year}": "Artikoloj de la {day}a de {month} {year}",
"Posts for {month} {year}": "Artikoloj de {month} {year}",
"Previous post": "Antaŭa artikolo",
"Previous": "",
"Publication date": "Eldona dato",
"RSS feed": "RSS fluo",
"Read in English": "Legu ĝin en Esperanto",
"Read more": "Legu pli",
"Skip to main content": "Salti al ĉefenhavo",
"Source": "Fonto",
"Subcategories:": "Subkategorioj:",
"Tags and Categories": "Etikedoj kaj kategorioj",
"Tags": "Etikedoj",
"Toggle navigation": "Ŝalti menuon",
"Uncategorized": "Sen kategorioj",
"Up": "",
"Updates": "Ĝisdatigoj",
"Write your page here.": "Skribu tie vian paĝon.",
"Write your post here.": "Skribu tie vian artikolon.",
"old posts, page %d": "%da paĝo de malnovaj artikoloj",
"page %d": "paĝo %d",
"{month} {day}, {year}": "",
"{month} {year}": "",
}
| 36.235294 | 82 | 0.588203 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,480 | 0.796555 |
dc36cb825ced38ddcdae874b1bd16a286fef0bf8 | 6,880 | py | Python | certbot_dns_desec/dns_desec.py | desec-io/certbot-dns-desec | 23f657223ee4a29d6b0d2c1dfaa8a54f3696028a | [
"Apache-2.0"
] | 4 | 2021-05-13T13:27:55.000Z | 2022-03-21T07:43:33.000Z | certbot_dns_desec/dns_desec.py | desec-io/certbot-dns-desec | 23f657223ee4a29d6b0d2c1dfaa8a54f3696028a | [
"Apache-2.0"
] | 14 | 2021-06-14T11:27:18.000Z | 2022-02-12T16:00:06.000Z | certbot_dns_desec/dns_desec.py | desec-io/certbot-dns-desec | 23f657223ee4a29d6b0d2c1dfaa8a54f3696028a | [
"Apache-2.0"
] | 1 | 2022-02-23T17:58:08.000Z | 2022-02-23T17:58:08.000Z | """DNS Authenticator for deSEC."""
import json
import logging
import time
import requests
import zope.interface
from certbot import errors
from certbot import interfaces
from certbot.plugins import dns_common
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@zope.interface.implementer(interfaces.IAuthenticator) # needed for compatibility with older certbots, see #13
@zope.interface.provider(interfaces.IPluginFactory) # needed for compatibility with older certbots, see #13
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator for deSEC
This Authenticator uses the deSEC REST API to fulfill a dns-01 challenge.
"""
description = "Obtain certificates using a DNS TXT record (if you are using deSEC.io for DNS)."
DEFAULT_ENDPOINT = "https://desec.io/api/v1"
def __init__(self, *args, **kwargs):
super(Authenticator, self).__init__(*args, **kwargs)
self.credentials = None
@classmethod
def add_parser_arguments(cls, add): # pylint: disable=arguments-differ
super(Authenticator, cls).add_parser_arguments(
add, default_propagation_seconds=80 # TODO decrease after deSEC fixed their NOTIFY problem
)
add("credentials", help="deSEC credentials INI file.")
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return (
"This plugin configures a DNS TXT record to respond to a dns-01 challenge using "
"the deSEC Remote REST API."
)
def _setup_credentials(self):
self.credentials = self._configure_credentials(
key="credentials",
label="deSEC credentials INI file",
required_variables={
"token": "Access token for deSEC API.",
},
)
def _desec_work(self, domain, validation_name, validation, set_operator):
client = self._get_desec_client()
zone = client.get_authoritative_zone(validation_name)
subname = validation_name.rsplit(zone['name'], 1)[0].rstrip('.')
records = client.get_txt_rrset(zone, subname)
logger.debug(f"Current TXT records: {records}")
records = set_operator(records, {f'"{validation}"'})
logger.debug(f"Setting TXT records: {records}")
client.set_txt_rrset(zone, subname, records)
def _perform(self, domain, validation_name, validation):
logger.debug(f"Authenticator._perform: {domain}, {validation_name}, {validation}")
self._desec_work(domain, validation_name, validation, set.union)
def _cleanup(self, domain, validation_name, validation):
logger.debug(f"Authenticator._cleanup: {domain}, {validation_name}, {validation}")
self._desec_work(domain, validation_name, validation, set.difference)
def _get_desec_client(self):
return _DesecConfigClient(
self.credentials.conf("endpoint") or self.DEFAULT_ENDPOINT,
self.credentials.conf("token"),
)
class _DesecConfigClient(object):
"""
Encapsulates all communication with the deSEC REST API.
"""
def __init__(self, endpoint, token):
logger.debug("creating _DesecConfigClient")
self.endpoint = endpoint.rstrip('/')
self.token = token
self.session = requests.Session()
self.session.headers["Authorization"] = f"Token {token}"
self.session.headers["Content-Type"] = "application/json"
@staticmethod
def desec_request(method, **kwargs):
for _ in range(3):
response: requests.Response = method(**kwargs)
if response.status_code == 429 and 'Retry-After' in response.headers:
try:
cooldown = int(response.headers['Retry-After'])
except ValueError:
return response
logger.debug(f"deSEC API limit reached. Retrying request after {cooldown}s.")
time.sleep(cooldown)
else:
return response
return response
def desec_get(self, **kwargs):
return self.desec_request(self.session.get, **kwargs)
def desec_put(self, **kwargs):
return self.desec_request(self.session.put, **kwargs)
def get_authoritative_zone(self, qname):
response = self.desec_get(url=f"{self.endpoint}/domains/?owns_qname={qname}")
self._check_response_status(response)
data = self._response_json(response)
try:
return data[0]
except IndexError:
raise errors.PluginError(f"Could not find suitable domain in your account (did you create it?): {qname}")
def get_txt_rrset(self, zone, subname):
domain = zone['name']
response = self.desec_get(
url=f"{self.endpoint}/domains/{domain}/rrsets/{subname}/TXT/",
)
if response.status_code == 404:
return set()
self._check_response_status(response, domain=domain)
return set(self._response_json(response).get('records', set()))
def set_txt_rrset(self, zone, subname, records: set):
domain = zone['name']
response = self.desec_put(
url=f"{self.endpoint}/domains/{domain}/rrsets/",
data=json.dumps([
{"subname": subname, "type": "TXT", "ttl": zone['minimum_ttl'], "records": list(records)},
]),
)
return self._check_response_status(response, domain=domain)
def _check_response_status(self, response, **kwargs):
if 200 <= response.status_code <= 299:
return
elif response.status_code in [401, 403]:
raise errors.PluginError(f"Could not authenticate against deSEC API: {response.content}")
elif response.status_code == 404:
raise errors.PluginError(f"Not found ({kwargs}): {response.content}")
elif response.status_code == 429:
raise errors.PluginError(f"deSEC throttled your request even after we waited the prescribed cool-down "
f"time. Did you use the API in parallel? {response.content}")
elif response.status_code >= 500:
raise errors.PluginError(f"deSEC API server error (status {response.status_code}): {response.content}")
else:
raise errors.PluginError(f"Unknown error when talking to deSEC (status {response.status_code}: "
f"Request was on '{response.request.url}' with payload {response.request.body}. "
f"Response was '{response.content}'.")
def _response_json(self, response):
try:
return response.json()
except json.JSONDecodeError:
raise errors.PluginError(f"deSEC API sent non-JSON response (status {response.status_code}): "
f"{response.content}")
| 41.445783 | 118 | 0.641279 | 6,374 | 0.926453 | 0 | 0 | 3,300 | 0.479651 | 0 | 0 | 2,123 | 0.308576 |
dc36d68e65eb23babc3b82c602c74e2f8d0d78f4 | 21,474 | py | Python | eeg_project/plot_data.py | nickrose/eeg_ML | 275e198bf04c5738af86095b6db2da9c2427dd0e | [
"MIT"
] | null | null | null | eeg_project/plot_data.py | nickrose/eeg_ML | 275e198bf04c5738af86095b6db2da9c2427dd0e | [
"MIT"
] | null | null | null | eeg_project/plot_data.py | nickrose/eeg_ML | 275e198bf04c5738af86095b6db2da9c2427dd0e | [
"MIT"
] | null | null | null | """ some tools for plotting EEG data and doing visual comparison """
from eeg_project.read_data import (my_read_eeg_generic, SAMP_FREQ,
pass_through, accumulate_subject_file_list, files_skip_processing,
sample_file_list, match_types)
import numpy as np
import pandas as pd
import torch
from collections import defaultdict
# from six import text_type
import tqdm
from matplotlib import pyplot, cm
# from mpl_toolkits.mplot3d import Axes3D
# # import ipywidgets as widgets
from IPython.display import clear_output
PRF_metrics = ['recall', 'precision', 'f_1_meas']
basic_metrics = ['acc', 'auc']
def plot_train_results(metrics2record, loss_metric,
train_metrics, test_metrics):
""" plot some learning curves for the training results """
pyplot.figure(figsize=(10, 5))
min_, max_ = np.min(loss_metric), np.max(loss_metric)
lg, = pyplot.plot(loss_metric)
pyplot.yticks(min_ + np.arange(5) * (max_ - min_))
# if learning_rate is not None:
# lg, = pyplot.plot(learning_rate)
pyplot.title('Loss')
pyplot.xlabel('Epoch')
pyplot.yscale('log')
pyplot.show()
for prm in basic_metrics:
if prm in metrics2record:
leg = []
met_idx = metrics2record.index(prm)
pyplot.figure(figsize=(10, 5))
lg, = pyplot.plot(train_metrics[:, met_idx], label=('train'))
leg.append(lg)
lg, = pyplot.plot(test_metrics[:, met_idx], label=('test'))
leg.append(lg)
pyplot.legend(handles=leg)
pyplot.title(prm)
pyplot.xlabel('Epoch')
pyplot.show()
has_prf = any([(prm in PRF_metrics) for prm in metrics2record])
if has_prf:
pyplot.figure(figsize=(10, 5))
leg = []
for prm in PRF_metrics:
if prm in metrics2record:
met_idx = metrics2record.index(prm)
lg, = pyplot.plot(train_metrics[:, met_idx],
label=(prm + ':train'))
leg.append(lg)
for prm in PRF_metrics:
if prm in metrics2record:
met_idx = metrics2record.index(prm)
lg, = pyplot.plot(test_metrics[:, met_idx],
label=(prm + ':test'))
leg.append(lg)
pyplot.legend(handles=leg)
pyplot.title('Precision / Recall')
pyplot.xlabel('Epoch')
pyplot.show()
def highlight_correlated_feature_twoclass(
file_samples=100,
match_types_in=match_types, figsize=(12, 14),
process_data=None, pca_vec_to_plot=5, debug=1):
""" sub-sample the data set and plot correlation and dominant (PCA) feature weight
information
"""
corr_accum = None
if process_data is not None:
data_type = '(frqeuency)'
else:
data_type = '(time-domain)'
for match_type in match_types_in:
for aidx, is_alcoholic in enumerate([True, False]):
corr_accum = cov_accum = None
if debug:
print(f'getting example data for match_type[{match_type}]'
f' and is_alcoholic[{is_alcoholic}]')
file_list = sample_file_list(
limitby=dict(
match=match_type,
alcoholic=is_alcoholic),
limit_mult_files=file_samples,
balance_types=[('subject', 10)], df_type='wide',
seed=42, debug=max(0, debug - 1))
for file in tqdm.tqdm(file_list):
df, info = my_read_eeg_generic(
file, orig_tt_indic=('test' in str.lower(file)))
corr = df.corr()
cov = df.cov().values
if corr_accum is None:
sen_names = corr.columns.levels[
corr.columns.names.index('sensor')]
nsen = len(sen_names)
if process_data:
x, Z, xl, yl = process_data([], df.values, '',
'', fs=SAMP_FREQ)
df = pd.DataFrame(Z)
corr = df.corr()
cov = df.cov().values
if corr_accum is None:
corr_accum = corr.values
cov_accum = cov/nsen
else:
corr_accum += corr.values
cov_accum += cov/nsen
corr_accum /= len(file_list)
if aidx == 0:
corr_alcoholic = corr_accum.copy()
cov_alcoholic = cov_accum.copy()
else:
corr_nonalch = corr_accum.copy()
cov_nonalch = cov_accum.copy()
if debug > 1:
pyplot.figure(figsize=figsize)
pyplot.pcolor(np.flipud(corr_accum))
pyplot.xticks(np.arange(nsen), sen_names)
pyplot.yticks(np.arange(nsen), reversed(sen_names))
pyplot.title(f'corr - across sensors {data_type} - '
f'is_alcoholic[{is_alcoholic}] - match[{match_type}]')
pyplot.colorbar()
pyplot.show()
Ua, svs_a, Va = np.linalg.svd(cov_alcoholic, full_matrices=False,
compute_uv=True)
Una, svs_na, Vna = np.linalg.svd(cov_nonalch, full_matrices=False,
compute_uv=True)
print('SVec size', Una.shape)
pyplot.figure(figsize=(figsize[0], 6))
leg = []
lg, = pyplot.plot(svs_a, label='alcoholic')
leg.append(lg)
lg, = pyplot.plot(svs_na, label='not alcoholic')
leg.append(lg)
pyplot.legend(handles=leg)
pyplot.title(f'PCA decomposition: SVs - across sensors {data_type} - '
f'- match[{match_type}]')
pyplot.show()
pyplot.figure(figsize=(figsize[0]+4, 8))
leg = []
is_slice = isinstance(pca_vec_to_plot, tuple)
if is_slice:
pca_vec_to_plot_count = pca_vec_to_plot[1] - pca_vec_to_plot[0]
else:
pca_vec_to_plot_count = pca_vec_to_plot
pca_vec_to_plot = (0, pca_vec_to_plot)
pca_vec_to_plot_count = min(pca_vec_to_plot_count, nsen)
for i, idx in enumerate(range(*pca_vec_to_plot)):
lg, = pyplot.plot(-i + Ua[:, idx], '-',
linewidth=2,
label=f'feat[{idx}]: alcoholic')
leg.append(lg)
for i, idx in enumerate(range(*pca_vec_to_plot)):
lg, = pyplot.plot(-i + Una[:, idx], '--',
linewidth=2,
label=f'feat[{idx}]: not alcoholic')
leg.append(lg)
pyplot.xticks(np.arange(nsen), sen_names, fontsize=7)
pyplot.xlim((0, nsen + 10))
pyplot.yticks(-np.arange(pca_vec_to_plot_count), [
f'PCA_dim[{i}]' for i in np.arange(pca_vec_to_plot_count)])
pyplot.legend(handles=leg)
# pyplot.xticks(np.arange(nsen), sen_names)
# pyplot.yticks(np.arange(nsen), reversed(sen_names))
ind_str = (f"{pca_vec_to_plot[0]} to {pca_vec_to_plot[1]-1}" if is_slice
else f"- first {pca_vec_to_plot_count}")
pyplot.title(f'PCA decomposition: singular vectors {ind_str} '
f'- across sensors {data_type} - '
f'match[{match_type}]')
pyplot.figure(figsize=figsize)
pyplot.pcolor(np.flipud(corr_alcoholic - corr_nonalch))
pyplot.xticks(np.arange(nsen), sen_names)
pyplot.yticks(np.arange(nsen), reversed(sen_names))
pyplot.title(f'corr - across sensors {data_type} - '
f'(alcoholic-nonalcoholic) - match[{match_type}]')
pyplot.colorbar()
pyplot.show()
if is_slice:
return Ua[:, pca_vec_to_plot[0]:pca_vec_to_plot[1]], Una[:,
pca_vec_to_plot[0]:pca_vec_to_plot[1]]
else:
return Ua[:, :pca_vec_to_plot_count], Una[:, :pca_vec_to_plot_count]
def plot_data_subject_dirs(data_dirs=None, file_list=None,
labelby=None, limitby=None, plots=None, figsize=None,
transparency=1., yscale='linear', xrange=None,
yrange=None, force_axes_same_scale=True,
process_data=pass_through, limit_mult_files=np.inf, debug=1):
""" plot EEG data by searching subject directories with some options """
df_type = 'wide'
if plots is None:
plots = dict(grid=True)
senlistorder = None
all_data_overlaid = ('all_data_traces' in plots and
(plots['all_data_traces'] is not None))
printed_entry_info = False
if ((data_dirs is None) and (file_list is None)):
if isinstance(limit_mult_files, tuple):
limit_mult, bal_list = limit_mult_files
else:
bal_list = None
limit_mult = limit_mult_files
if np.isinf(limit_mult):
limit_mult = None
file_list = sample_file_list(
limitby=limitby,
limit_mult_files=limit_mult,
balance_types=bal_list, df_type=df_type,
seed=42, debug=max(0, debug - 1))
if file_list is None:
file_list, unique_entries, total_files = accumulate_subject_file_list(
data_dirs, limitby=limitby, limit_mult_files=limit_mult_files,
df_type=df_type, debug=debug)
if debug:
print('unique entries in metadata from file accumulation')
for k in unique_entries:
print(f' {k}: {unique_entries[k]}')
printed_entry_info = True
else:
total_files = len(file_list)
plot_sensor = None
if all_data_overlaid:
if transparency == 1.:
transparency = 0.5
plot_to_make = sum([bool(plots[k]) for k in plots if 'all_data' not in k])
if isinstance(plots['all_data_traces'], str):
plot_sensor = plots['all_data_traces']
if plot_to_make == 0:
if isinstance(plots['all_data_traces'], str):
plots['overlap'] = True
else:
plots['grid'] = True
assert sum([bool(plots[k]) for k in plots if 'all_data' not in k]) == 1, (
"cannot display multiple plot types")
assert isinstance(plots['all_data_traces'], str) or (
'overlap' not in plots or not(plots['overlap'])), (
"cannot plot single overlapping plot if sensor is not specified")
if figsize is None and 'grid' in plots and isinstance(plots['grid'], str):
if plots['grid'].startswith('square'):
figsize = (16, 18)
else:
figsize = (15, 64 * 8)
pyplot.figure(figsize=figsize)
legd = []
running_min_max = (np.inf, -np.inf)
if debug == 1:
progress_bar = tqdm.tqdm(total=total_files, miniters=1)
else:
legd = None
if figsize is None:
figsize = (12, 14)
if isinstance(limit_mult_files, tuple):
limit_mult_files = limit_mult_files[0]
file_count = 0
color_dict = dict()
unique_entries = defaultdict(set)
for file in file_list:
orig_data_dir = int(('test' in str.lower(file)))
if file in files_skip_processing:
continue
if all_data_overlaid:
if debug == 1:
progress_bar.n = file_count
progress_bar.set_description('files processed')
if file_count >= limit_mult_files:
break
else:
clear_output()
full_file_url = file
if debug > 1:
print(f'read file: {full_file_url}')
df, info = my_read_eeg_generic(full_file_url, df_type=df_type,
orig_tt_indic=orig_data_dir)
if all_data_overlaid:
if labelby and labelby in info:
id = labelby + ':' + str(info[labelby])
else:
id = info['subject']
else:
id = None
if debug > 1:
print(' | '.join([f'{n:>8s}:{str(v):4s}' for n, v in info.items()]))
sen_index = df.columns.names.index('sensor')
senlist = df.columns.levels[sen_index]
if senlistorder is None:
senlistorder = senlist
elif all_data_overlaid:
assert all([sl == chkl
for sl, chkl, in zip(senlist, senlistorder)]), (
'different data set has list of sensors in a '
'different order')
Z = df.values
nsamp, nsen = Z.shape
time = np.arange(nsamp) / SAMP_FREQ
x_data, Z, xlabel, ylabel = process_data(time, Z, 'time (s)',
'voltage (uV)', fs=SAMP_FREQ)
if all_data_overlaid and force_axes_same_scale:
running_min_max = (min(Z.min(), running_min_max[0]),
max(Z.max(), running_min_max[1]))
minv, maxv = running_min_max
else:
minv = maxv = None
if ('overlap' in plots and plots['overlap']):
plot_all_overlaid(x_data, Z, xlabel, ylabel, senlist, figsize,
id=id, yscale=yscale, yrange=yrange, xrange=xrange,
multi_trace_plot_labels=(file_count == 0),
color_dict=color_dict, transparency=transparency,
plot_sensor=plot_sensor, legend=legd)
if ('grid' in plots and plots['grid']):
grid_square = (not(isinstance(plots['grid'], str)) or
plots['grid'].startswith('square'))
plot_grid(x_data, Z, xlabel, ylabel, senlist, minv, maxv,
id=id, grid_square=grid_square, figsize=figsize,
multi_trace_plot_labels=(file_count == 0),
yscale=yscale, yrange=yrange, xrange=xrange,
color_dict=color_dict, transparency=transparency,
legend=legd)
if ('threed' in plots and plots['threed']) and not(
all_data_overlaid):
y_data = df.columns.labels[sen_index].values()
plot_3d(x_data, y_data, Z, df, xlabel, ylabel, figsize=figsize)
if not(all_data_overlaid):
input('press enter to cont...')
file_count += 1
for k in info:
unique_entries[k].add(info[k])
if file_count >= limit_mult_files:
break
if all_data_overlaid:
if 'overlap' in plots and plots['overlap']:
pyplot.xlabel(xlabel, fontsize=14)
pyplot.ylabel(ylabel, fontsize=15)
# if minmax[1]/(minmax[0] if minmax[0] > 0 else 1.) > 1e1:
# pyplot.axes().set_xscale('log', basex=2)
pyplot.title(f'Sensor: {plots["all_data_traces"]}', fontsize=15)
pyplot.legend(handles=legd, fontsize=15)
pyplot.show()
if debug and not(printed_entry_info):
print('unique entries in metadata from file accumulation')
for k in unique_entries:
print(f' {k}: {unique_entries[k]}')
return file_list
def aggregate_behavior(Z):
""" returns some basic trace information """
nsamp, nsen = Z.shape
median_trace = np.median(Z, axis=1)
dev = np.std(Z - np.repeat(np.matrix(median_trace).transpose(),
nsen, axis=1), axis=1)
cmpr_high_variability = [(Z[:, sen_i] > median_trace + 2 * dev
).sum()/nsamp > 0.5 for sen_i in range(nsen)]
return nsamp, nsen, cmpr_high_variability, median_trace, dev
def plot_grid(x_data, Z, xlabel, ylabel, senlist,
minv=None, maxv=None, id=None, grid_square=True,
figsize=(12, 15), multi_trace_plot_labels=False,
yscale='linear', xrange=None, yrange=None,
color_dict={}, transparency=1., legend=None):
""" plot a gride of sensor traces """
nsen = len(senlist)
all_data_overlaid = (id is not None) and (legend is not None)
grid_base_sz = int(np.ceil(np.sqrt(nsen)))
if grid_square:
ncols = nrows = grid_base_sz
else:
ncols, nrows = 1, nsen
sen_i = 0
coli = rowi = 0
if all_data_overlaid:
pyplot.subplots_adjust(wspace=.2, hspace=.35)
for sen_i, sen in enumerate(senlist):
pyplot.subplot(nrows, ncols, sen_i+1)
if id in color_dict:
clrdict = {'color': color_dict[id]}
else:
clrdict = {}
lg, = pyplot.plot(x_data, Z[:, sen_i],
'-', label=id, alpha=transparency, **clrdict)
if id not in color_dict:
legend.append(lg)
color_dict[id] = lg.get_color()
if minv is not None and maxv is not None:
pyplot.ylim((minv, maxv))
if multi_trace_plot_labels:
pyplot.title(sen, fontdict=dict(size=10))
if ncols == 1 or (coli == grid_base_sz//2 and
rowi == grid_base_sz):
pyplot.xlabel(xlabel)
if ncols == 1 or (coli == 0 and
rowi == grid_base_sz//2):
pyplot.ylabel(ylabel)
pyplot.grid(True)
if yscale:
pyplot.yscale(yscale)
if xrange:
pyplot.xlim(xrange)
if yrange:
pyplot.ylim(yrange)
sen_i += 1
coli += 1
if coli >= ncols:
rowi += 1
coli = 0
else:
minv, maxv = Z.min(), Z.max()
nsamp, nsen, cmpr_high_variability, median_trace, dev = \
aggregate_behavior(Z)
pyplot.figure(figsize=figsize)
pyplot.subplots_adjust(wspace=.2, hspace=.35)
for sen_i, sen in enumerate(senlist):
pyplot.subplot(nrows, ncols, sen_i+1)
lg, = pyplot.plot(x_data, Z[:, sen_i],
(':' if cmpr_high_variability else '-'),
label=sen)
pyplot.ylim((minv, maxv))
pyplot.title(sen, fontdict=dict(size=10))
pyplot.tick_params(axis='y', which='major', labelsize=7)
if ncols == 1 or (coli == int(grid_base_sz/2) and
rowi == grid_base_sz - 1):
pyplot.xlabel(xlabel)
if ncols == 1 or (coli == 0 and
rowi == int(grid_base_sz/2)):
pyplot.ylabel(ylabel)
pyplot.grid(True)
if yscale:
pyplot.yscale(yscale)
if xrange:
pyplot.xlim(xrange)
if yrange:
pyplot.ylim(yrange)
sen_i += 1
coli += 1
if coli >= ncols:
rowi += 1
coli = 0
pyplot.show()
def plot_3d(x_data, y_data, Z, df, xlabel, ylabel, xrange=None,
yrange=None, figsize=(12, 12)):
""" plot a 3-d version of the samples X sensors EEG data """
fig = pyplot.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
nsamp, nsen = Z.shape
sen_index = df.columns.names.index('sensor')
senlist = df.columns.levels[sen_index]
pyplot.yticks(y_data, senlist)
ax.plot_surface(
np.repeat(x_data,
nsen, axis=1),
np.repeat(np.matrix(y_data), nsamp, axis=0),
df.values,
cmap=cm.coolwarm)
pyplot.xlabel(xlabel)
pyplot.ylabel('Sensor name')
ax.set_zlabel(ylabel)
ax.view_init(elev=45., azim=-130)
ax.tick_params(axis='y', which='major', labelsize=4)
pyplot.show()
def plot_all_overlaid(x_data, Z, xlabel, ylabel, sen_list, figsize=(12, 14),
multi_trace_plot_labels=True,
id=None, plot_sensor=None, yscale='linear', xrange=None,
yrange=None, legend=None, color_dict={}, transparency=1.):
""" plot some overlapping data for single subject/trial or for all data on
a single sensor
"""
all_data_overlaid = (plot_sensor is not None) and (legend is not None) and (
id is not None)
if not(all_data_overlaid):
pyplot.figure(figsize=figsize)
legend = []
for sen_i, sen in enumerate(sen_list):
if all_data_overlaid:
if sen == plot_sensor:
if id in color_dict:
clrdict = {'color': color_dict[id]}
else:
clrdict = {}
lg, = pyplot.plot(x_data, Z[:, sen_i],
'-', label=id, alpha=transparency, **clrdict)
if id not in color_dict:
legend.append(lg)
color_dict[id] = lg.get_color()
else:
continue
else:
nsamp, nsen, cmpr_high_variability, median_trace, dev = \
aggregate_behavior(Z)
lg, = pyplot.plot(x_data, Z[:, sen_i],
(':' if (cmpr_high_variability and
cmpr_high_variability[sen_i]) else '-'),
label=sen, alpha=transparency)
legend.append(lg)
if multi_trace_plot_labels and median_trace is not None:
lg, = pyplot.plot(x_data, median_trace, '--',
label='median', linewidth=5)
pyplot.xlabel(xlabel, fontsize=14)
pyplot.ylabel(ylabel, fontsize=15)
pyplot.legend(handles=legend, fontsize=7)
pyplot.title('Sensor traces', fontsize=15)
if yscale:
pyplot.yscale(yscale)
if xrange:
pyplot.xlim(xrange)
if yrange:
pyplot.ylim(yrange)
if not(all_data_overlaid):
pyplot.show()
| 38.831826 | 86 | 0.557698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,670 | 0.124336 |
dc36fd31dfeeb75d7050190c4caf81b68761883e | 8,634 | py | Python | megumin/modulos/admin/mute.py | davitudoplugins1234/WhiterKang | f4779d2c440849fa97e7014cd856f885b0abbc87 | [
"MIT"
] | 2 | 2022-02-01T17:55:44.000Z | 2022-03-27T17:21:55.000Z | megumin/modulos/admin/mute.py | davitudoplugins1234/WhiterKang | f4779d2c440849fa97e7014cd856f885b0abbc87 | [
"MIT"
] | null | null | null | megumin/modulos/admin/mute.py | davitudoplugins1234/WhiterKang | f4779d2c440849fa97e7014cd856f885b0abbc87 | [
"MIT"
] | null | null | null | import asyncio
from pyrogram import filters
from pyrogram.errors import PeerIdInvalid, UserIdInvalid, UsernameInvalid
from pyrogram.types import ChatPermissions, Message
from megumin import megux
from megumin.utils import (
check_bot_rights,
check_rights,
extract_time,
is_admin,
is_dev,
is_self,
sed_sticker,
get_collection,
)
@megux.on_message(filters.command("mute", prefixes=["/", "!"]))
async def _mute_user(_, message: Message):
DISABLED = get_collection(f"DISABLED {message.chat.id}")
query = "mute"
off = await DISABLED.find_one({"_cmd": query})
if off:
return
chat_id = message.chat.id
if not await check_rights(chat_id, message.from_user.id, "can_restrict_members"):
await message.reply("Você não tem direitos suficientes para silenciar usuários")
return
cmd = len(message.text)
replied = message.reply_to_message
reason = ""
if replied:
id_ = replied.from_user.id
if cmd > 5:
_, reason = message.text.split(maxsplit=1)
elif cmd > 5:
_, args = message.text.split(maxsplit=1)
if " " in args:
id_, reason = args.split(" ", maxsplit=1)
else:
id_ = args
else:
await message.reply("`Nenhum User_id válido ou mensagem especificada.`")
return
try:
user = await megux.get_users(id_)
user_id = user.id
mention = user.mention
except (UsernameInvalid, PeerIdInvalid, UserIdInvalid):
await message.reply(
"`User_id ou nome de usuário inválido, tente novamente com informações válidas ⚠`"
)
return
if await is_self(user_id):
await message.reply("Eu não vou mutar!")
return
if is_dev(user_id):
await message.reply("Porque eu iria mutar meu desenvolvedor? Isso me parece uma idéia muito idiota.")
return
if is_admin(chat_id, user_id):
await message.reply("Porque eu iria mutar um(a) administrador(a)? Isso me parece uma idéia bem idiota.")
return
if not await check_rights(chat_id, megux.me.id, "can_restrict_members"):
await message.reply("Não posso restringir as pessoas aqui! Certifique-se de que sou administrador e de que posso adicionar novos administradores.")
await sed_sticker(message)
return
sent = await message.reply("`Mutando Usuário...`")
try:
await megux.restrict_chat_member(chat_id, user_id, ChatPermissions())
await asyncio.sleep(1)
await sent.edit(
f"{mention} está silenciado(mutado) em <b>{message.chat.title}</b>\n"
f"<b>Motivo:</b> `{reason or None}`"
)
except Exception as e_f:
await sent.edit(f"`Algo deu errado 🤔`\n\n**ERROR**: `{e_f}`")
@megux.on_message(filters.command("tmute", prefixes=["/", "!"]))
async def _tmute_user(_, message: Message):
DISABLED = get_collection(f"DISABLED {message.chat.id}")
query = "tmute"
off = await DISABLED.find_one({"_cmd": query})
if off:
return
chat_id = message.chat.id
if not await check_rights(chat_id, message.from_user.id, "can_restrict_members"):
await message.reply("Você não tem direitos suficientes para silenciar usuários")
return
cmd = len(message.text)
replied = message.reply_to_message
if replied:
id_ = replied.from_user.id
if cmd <= 6:
await message.reply("__Você deve especificar um tempo após o comando. Por exemplo:__ <b>/tmute 7d.</b>")
return
_, args = message.text.split(maxsplit=1)
elif cmd > 6:
_, text = message.text.split(maxsplit=1)
if " " in text:
id_, args = text.split(" ", maxsplit=1)
else:
await message.reply("__Você deve especificar um tempo após o comando. Por exemplo:__ **/tmute 7d.**")
else:
await message.reply("`Nenhum User_id válido ou mensagem especificada.`")
return
if " " in args:
split = args.split(None, 1)
time_val = split[0].lower()
reason = split[1]
else:
time_val = args
reason = ""
time_ = await extract_time(message, time_val)
if not time_:
return
try:
user = await megux.get_users(id_)
user_id = user.id
mention = user.mention
except (UsernameInvalid, PeerIdInvalid, UserIdInvalid):
await message.reply(
"`User_id ou nome de usuário inválido, tente novamente com informações válidas ⚠`"
)
return
if await is_self(user_id):
await message.reply("Eu não vou mutar!")
return
if is_dev(user_id):
await message.reply("Porque eu iria mutar meu desenvolvedor? Isso me parece uma idéia muito idiota.")
return
if is_admin(chat_id, user_id):
await message.reply("Porque eu iria mutar um(a) administrador(a)? Isso me parece uma idéia bem idiota.")
return
if not await check_rights(chat_id, megux.me.id, "can_restrict_members"):
await message.reply("Não posso restringir as pessoas aqui! Certifique-se de que sou administrador e de que posso adicionar novos administradores.")
await sed_sticker(message)
return
sent = await message.reply("`Mutando usuário...`")
try:
await megux.restrict_chat_member(chat_id, user_id, ChatPermissions(), time_)
await asyncio.sleep(1)
await sent.edit(
f"{mention} está silenciado(mutado) por <b>{time_val}</b> em <b>{message.chat.title}</b>\n"
f"<b>Motivo</b>: `{reason or None}`"
)
except Exception as e_f: # pylint: disable=broad-except
await sent.edit(f"`Algo deu errado 🤔`\n\n**ERROR**: `{e_f}`")
@megux.on_message(filters.command("unmute", prefixes=["/", "!"]))
async def _unmute_user(_, message: Message):
DISABLED = get_collection(f"DISABLED {message.chat.id}")
query = "unmute"
off = await DISABLED.find_one({"_cmd": query})
if off:
return
chat_id = message.chat.id
if not await check_rights(chat_id, message.from_user.id, "can_restrict_members"):
await message.reply("Você não tem direitos suficientes para silenciar usuários")
return
replied = message.reply_to_message
if replied:
id_ = replied.from_user.id
elif len(message.text) > 7:
_, id_ = message.text.split(maxsplit=1)
else:
await message.reply("`Nenhum User_id válido ou mensagem especificada.`")
return
try:
user = (await megux.get_users(id_))
mention = user.mention
user_id = user.id
except (UsernameInvalid, PeerIdInvalid, UserIdInvalid):
await message.reply(
"`User_id ou nome de usuário inválido, tente novamente com informações válidas ⚠`"
)
return
if await is_self(user_id):
return
if is_admin(chat_id, user_id):
await message.reply("Este usuario é administrador(a), ele não precisa ser desmutado(a).")
return
if not await check_rights(chat_id, megux.me.id, "can_restrict_members"):
await message.reply("Não posso restringir as pessoas aqui! Certifique-se de que sou administrador e de que posso adicionar novos administradores.")
await sed_sticker(message)
return
sent = await message.reply("Desmutando Usuário...")
try:
await megux.unban_chat_member(chat_id, user_id)
await sent.edit(f"Ok, {mention} já pode começar a falar novamente em {message.chat.title}!")
except Exception as e_f:
await sent.edit(f"`Algo deu errado!` 🤔\n\n**ERROR:** `{e_f}`")
@megux.on_message(filters.command("muteme", prefixes=["/", "!"]))
async def muteme_(_, message: Message):
DISABLED = get_collection(f"DISABLED {message.chat.id}")
query = "muteme"
off = await DISABLED.find_one({"_cmd": query})
if off:
return
chat_id = message.chat.id
user_id = message.from_user.id
if is_admin(chat_id, user_id):
await message.reply("Por que eu mutaria um(a) administrador(a)? Parece uma ideia bem idiota.")
return
else:
try:
if not await check_rights(chat_id, megux.me.id, "can_restrict_members"):
await message.reply("Não posso restringir as pessoas aqui! Certifique-se de que sou administrador e de que posso adicionar novos administradores.")
return
await message.reply("Sem Problemas, Mutado!")
await megux.restrict_chat_member(chat_id, user_id, ChatPermissions())
except Exception as e:
await message.reply(f"**ERRO:**\n{e}")
| 39.245455 | 163 | 0.643734 | 0 | 0 | 0 | 0 | 8,325 | 0.957007 | 8,064 | 0.927003 | 2,786 | 0.320267 |
dc37303245aa0b25a23b011f2a90851c1f3dd75f | 2,619 | py | Python | conftest.py | juju-solutions/kubeflow | b23fe95b8d239fd979f47784b51a8cb9284ccea4 | [
"Apache-2.0"
] | null | null | null | conftest.py | juju-solutions/kubeflow | b23fe95b8d239fd979f47784b51a8cb9284ccea4 | [
"Apache-2.0"
] | null | null | null | conftest.py | juju-solutions/kubeflow | b23fe95b8d239fd979f47784b51a8cb9284ccea4 | [
"Apache-2.0"
] | null | null | null | import argparse
import os
# Use a custom parser that lets us require a variable from one of CLI or environment variable,
# this way we can pass creds through CLI for local testing but via environment variables in CI
class EnvDefault(argparse.Action):
"""Argument parser that accepts input from CLI (preferred) or an environment variable
If a value is not specified in the CLI argument, the content of the environment variable
named `envvar` is used. If this environment variable is also empty, the parser will fail
citing a missing required argument
Note this Action does not accept the `required` and `default` kwargs and will set them itself
as appropriate.
Modified from https://stackoverflow.com/a/10551190/5394584
"""
def __init__(self, option_strings, dest, envvar, **kwargs):
# Determine the values for `required` and `default` based on whether defaults are available
# from an environment variable
if envvar:
if envvar in os.environ:
# An environment variable of this name exists, use that as a default
default = os.environ[envvar]
required = False
else:
# We have no default, require a value from the CLI
required = True
default = None
else:
raise ValueError(f"EnvDefault requires non-null envvar, got '{envvar}'")
self.envvar = envvar
super(EnvDefault, self).__init__(option_strings, dest, default=default, required=required, **kwargs)
def __call__(self, parser, namespace, values, option_string):
# Actually set values to the destination arg in the namespace
setattr(namespace, self.dest, values)
def pytest_addoption(parser):
parser.addoption("--proxy", action="store", help="Proxy to use")
parser.addoption("--url", action="store", help="Kubeflow dashboard URL")
parser.addoption("--headful", action="store_true", help="Juju model")
username_envvar = "KUBEFLOW_AUTH_USERNAME"
parser.addoption(
"--username",
action=EnvDefault,
envvar=username_envvar,
help=f"Dex username (email address). Required, but can be passed either through CLI or "
f"via environment variable '{username_envvar}",
)
password_envvar = "KUBEFLOW_AUTH_PASSWORD"
parser.addoption(
"--password",
action=EnvDefault,
envvar=password_envvar,
help=f"Dex password. Required, but can be passed either through CLI or "
f"via environment variable '{password_envvar}"
)
| 40.921875 | 108 | 0.670867 | 1,533 | 0.585338 | 0 | 0 | 0 | 0 | 0 | 0 | 1,464 | 0.558992 |
dc3872f0ebff03ef64b3d36e139b9d9d4c69ef39 | 5,526 | py | Python | fumi/deployer.py | rmed/fumi | f327ea0814d98790dfd021d23d609531a23d03d0 | [
"MIT"
] | 4 | 2015-03-29T16:32:30.000Z | 2021-06-17T22:40:56.000Z | fumi/deployer.py | rmed/fumi | f327ea0814d98790dfd021d23d609531a23d03d0 | [
"MIT"
] | 2 | 2021-03-25T21:27:57.000Z | 2022-03-29T21:53:35.000Z | fumi/deployer.py | rmed/fumi | f327ea0814d98790dfd021d23d609531a23d03d0 | [
"MIT"
] | 1 | 2020-02-09T17:35:14.000Z | 2020-02-09T17:35:14.000Z | # -*- coding: utf-8 -*-
#
# fumi deployment tool
# https://github.com/rmed/fumi
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Rafael Medina García <rafamedgar@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Code for the ``Deployer`` class, which acts as proxy for configurations."""
import gettext
import types
from fumi import messages as m
from fumi import deployments
from fumi.util import cprint
class Deployer(object):
"""Configuration parsed from the ``fumi.yml`` file.
Attributes:
source_type (str): Source type (e.g. 'local' or 'git'). Required.
source_path (str): Path to the source files in local machine. Required.
host (str): Host to perform the deployment in. Required.
user (str): User to use for the deployment. Required.
use_password (bool): Whether or not to use password. If set to ``False``
(default), will rely on public key authentication. Otherwise, it
will be asked for during deployment or may be configured through the
``password`` attribute.
password (str): If ``use_password`` is set to True, this attribute can
be used to specify the password used for the connection. Otherwise
it will be asked for during deployment.
deploy_path (str): Remote host path in which to deploy files. Required.
predep (list[str]): List of commands to execute before deploying.
postdep (list[str]): List of commands to execute after deploying.
host_tmp (str): In ``local`` deployments, the remote directory to use
for uploading the compressed files (defaults to ``'/tmp'``).
keep_max (int): Maximum revisions to keep in the remote server.
local_ignore (list[str]): List of files (or directories) to ignore in
``local`` deployments.
buffer_size (int): Buffer size (in bytes) for file copying in ``local``
deployments. Defaults to 1 MB.
shared_paths (list[str]): List of file and directory paths that
should be shared accross deployments. These are relative to the
root of the project and are linked to the current revision.
"""
def __init__(self, **kwargs):
# Source information
self.source_type = kwargs['source-type']
self.source_path = kwargs['source-path']
# Destination host information
self.host = kwargs['host']
self.user = kwargs['user']
self.use_password = kwargs.get('use-password', False)
self.password = kwargs.get('password')
self.deploy_path = kwargs['deploy-path']
# Pre-deployment commands
predep = kwargs.get('predep', [])
self.predep = []
for p in predep:
# Single key dicts
for k, v in p.items():
self.predep.append((k, v))
# Post-deployment commands
postdep = kwargs.get('postdep', [])
self.postdep = []
for p in postdep:
# Single key dicts
for k, v in p.items():
self.postdep.append((k, v))
# Optional information
self.host_tmp = kwargs.get('host-tmp', '/tmp')
self.keep_max = kwargs.get('keep-max')
self.local_ignore = kwargs.get('local-ignore')
self.buffer_size = int(kwargs.get('buffer-size', 1024 * 1024))
self.shared_paths = kwargs.get('shared-paths', [])
def build_deployer(config):
"""Build a Deployer object.
Arguments:
config (dict): Parsed section of the YAML configuration file.
Returns:
Boolean indicating result and ``Deployer`` instance or ``None``.
"""
try:
deployer = Deployer(**config)
except KeyError as e:
# Missing required parameter
key = e.args[0]
cprint(m.DEP_MISSING_PARAM + '\n' % key, 'red')
return False, None
# Determine deployment function to use
if deployer.source_type == 'local':
cprint(m.DEP_LOCAL)
deployer.deploy = types.MethodType(deployments.deploy_local, deployer)
elif deployer.source_type == 'git':
cprint(m.DEP_GIT)
deployer.deploy = types.MethodType(deployments.deploy_git, deployer)
else:
# Unknown deployment type
cprint(m.DEP_UNKNOWN % deployer.source_type, 'red')
return False, None
# Additional method for preparing/testing the deployment
deployer.prepare = types.MethodType(deployments.prepare, deployer)
return True, deployer
| 38.915493 | 80 | 0.661238 | 3,007 | 0.544056 | 0 | 0 | 0 | 0 | 0 | 0 | 3,731 | 0.67505 |
dc38feeb235182bd05de845499f4c3e44aa113e6 | 15,396 | py | Python | artap/tests/_test_scikit.py | tamasorosz/artap | e8df160bfc9c378c3fc96b0b86e92d75d89cf26b | [
"MIT"
] | 5 | 2021-06-13T17:04:37.000Z | 2022-03-04T17:16:06.000Z | artap/tests/_test_scikit.py | tamasorosz/artap | e8df160bfc9c378c3fc96b0b86e92d75d89cf26b | [
"MIT"
] | null | null | null | artap/tests/_test_scikit.py | tamasorosz/artap | e8df160bfc9c378c3fc96b0b86e92d75d89cf26b | [
"MIT"
] | 8 | 2021-03-11T18:23:47.000Z | 2022-02-22T11:13:23.000Z | import math
import unittest
from scipy import integrate
from ..problem import Problem
from ..algorithm_genetic import NSGAII
from ..algorithm_sweep import SweepAlgorithm
from ..benchmark_functions import Booth
from ..results import Results
from ..operators import LHSGenerator
from ..surrogate_scikit import SurrogateModelScikit
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import ExtraTreesRegressor
class MyProblemCoil(Problem):
def set(self):
self.parameters = [{'name': 'x1', 'initial_value': 0.01, 'bounds': [5e-3, 50e-3]},
{'name': 'x2', 'initial_value': 0.01, 'bounds': [5e-3, 50e-3]},
{'name': 'x3', 'initial_value': 0.01, 'bounds': [5e-3, 50e-3]},
{'name': 'x4', 'initial_value': 0.01, 'bounds': [5e-3, 50e-3]},
{'name': 'x5', 'initial_value': 0.01, 'bounds': [5e-3, 50e-3]},
{'name': 'x6', 'initial_value': 0.01, 'bounds': [5e-3, 50e-3]},
{'name': 'x7', 'initial_value': 0.01, 'bounds': [5e-3, 50e-3]},
{'name': 'x8', 'initial_value': 0.01, 'bounds': [5e-3, 50e-3]},
{'name': 'x9', 'initial_value': 0.01, 'bounds': [5e-3, 50e-3]},
{'name': 'x10', 'initial_value': 0.01, 'bounds': [5e-3, 50e-3]}]
def intl22(self, R2, R, dZ, phi):
return math.sqrt(R2 ** 2 + R ** 2 - 2.0 * R2 * R * math.cos(phi) + dZ ** 2)
def intg(self, R2, R, dZ):
# div J = 0 - nonconstant current density
f = lambda phi: math.log(R2 - R * math.cos(phi) + self.intl22(R2, R, dZ, phi)) * math.cos(phi)
return integrate.quad(f, 0, 2.0 * math.pi, epsabs=1e-3, epsrel=1e-3)[0]
def inth(self, R2, R, dZ):
# div J = 0 - nonconstant current density
f = lambda phi: - math.log(dZ + self.intl22(R2, R, dZ, phi))
return integrate.quad(f, 0, 2.0 * math.pi, epsabs=1e-3, epsrel=1e-3)[0]
def integral(self, rc, zc, R, Z):
w = 0.001
h = 0.0015
R1 = rc
R2 = rc + w
Z1 = zc
Z2 = zc + h
mu0 = 4.0 * math.pi * 1e-7
Jext = 2e6
# div J = 0 - nonconstant current density
C = mu0 * Jext * w * h / (4 * math.pi * (Z2 - Z1) * math.log(R2 / R1))
# upper coil
Bru = C * (self.intg(R2, R, Z2 - Z) - self.intg(R2, R, Z1 - Z) - self.intg(R1, R, Z2 - Z) + self.intg(R1, R, Z1 - Z))
Bzu = C * (self.inth(R2, R, Z2 - Z) - self.inth(R2, R, Z1 - Z) - self.inth(R1, R, Z2 - Z) + self.inth(R1, R, Z1 - Z))
# lower coil
Brl = C * (self.intg(R2, R, -Z1 - Z) - self.intg(R2, R, -Z2 - Z) - self.intg(R1, R, -Z1 - Z) + self.intg(R1, R, -Z2 - Z))
Bzl = C * (self.inth(R2, R, -Z1 - Z) - self.inth(R2, R, -Z2 - Z) - self.inth(R1, R, -Z1 - Z) + self.inth(R1, R, -Z2 - Z))
return [Bru + Brl, Bzu + Bzl]
def integral_all(self, R, Z, x):
Br = 0.0
Bz = 0.0
for k in range(0, 9):
rc = x[k]
zc = k * 1.5e-3
B = self.integral(rc, zc, R, Z)
Br = Br + B[0]
Bz = Bz + B[1]
return [Br, Bz]
def evaluate(self, x):
pass
class MyProblemCoilOne(MyProblemCoil):
def evaluate(self, individual):
x = individual.vector
B0 = 2e-3
dxy = 0.5e-3
nx = 8
ny = 8
dx = (5e-3 - dxy) / (nx - 1)
dy = (5e-3 - dxy) / (ny - 1)
f1 = 0.0
for i in range(0, nx):
xx = dxy + i * dx
for j in range(0, ny):
yy = dxy + j * dy
[Br, Bz] = self.integral_all(xx, yy, x)
Bp1s = math.sqrt((Br - 0.0)**2 + (Bz - B0)**2)
f1 = max(f1, Bp1s)
print("value = {}, \tparams = {}".format([f1], x))
return [f1]
class MyProblemCoilMultiTwo1(MyProblemCoil):
def evaluate(self, x):
B0 = 2e-3
dxy = 0.5e-3
nx = 8
ny = 8
dx = (5e-3 - dxy) / (nx - 1)
dy = (5e-3 - dxy) / (ny - 1)
f1 = 0.0
f2 = sum(x) * 1e3
for i in range(0, nx):
xx = dxy + i * dx
for j in range(0, ny):
yy = dxy + j * dy
[Br, Bz] = self.integral_all(xx, yy, x)
Bp1s = math.sqrt((Br - 0.0)**2 + (Bz - B0)**2)
f1 = max(f1, Bp1s)
return [1e3 * f1, 1e3 * f2]
class MyProblemCoilMultiTwo2(MyProblemCoil):
def __init__(self, name):
super().__init__(name, costs=['F1', 'F2'])
def evaluate(self, individual):
x = individual.vector
B0 = 2e-3
dxy = 0.5e-3
nx = 8
ny = 8
dx = (5e-3 - dxy) / (nx - 1)
dy = (5e-3 - dxy) / (ny - 1)
f1 = 0.0
f3 = 0.0
for i in range(0, nx):
xx = dxy + i * dx
for j in range(0, ny):
yy = dxy + j * dy
[Br, Bz] = self.integral_all(xx, yy, x)
Bp1s = math.sqrt((Br - 0.0)**2 + (Bz - B0)**2)
f1 = max(f1, Bp1s)
dxsi = 0.5e-3
[Brp, Bzp] = self.integral_all(xx + dxsi, yy, x)
[Brm, Bzm] = self.integral_all(xx - dxsi, yy, x)
Bp3 = math.sqrt((Brp - Br) ** 2 + (Bzp - Bz) ** 2) + math.sqrt((Brm - Br) ** 2 + (Bzm - Bz) ** 2)
f3 = max(f3, Bp3)
return [1e3 * f1, 1e3 * f3]
class MyProblemCoilMultiThree(MyProblemCoil):
def __init__(self, name):
super().__init__(name, costs=['F1', 'F2', 'F3'])
def evaluate(self, individual):
x = individual.vector
B0 = 2e-3
dxy = 0.5e-3
nx = 8
ny = 8
dx = (5e-3 - dxy) / (nx - 1)
dy = (5e-3 - dxy) / (ny - 1)
f1 = 0.0
f2 = sum(x)*1e3
f3 = 0.0
for i in range(0, nx):
xx = dxy + i * dx
for j in range(0, ny):
yy = dxy + j * dy
[Br, Bz] = self.integral_all(xx, yy, x)
Bp1s = math.sqrt((Br - 0.0)**2 + (Bz - B0)**2)
f1 = max(f1, Bp1s)
dxsi = 0.5e-3
[Brp, Bzp] = self.integral_all(xx + dxsi, yy, x)
[Brm, Bzm] = self.integral_all(xx - dxsi, yy, x)
Bp3 = math.sqrt((Brp - Br) ** 2 + (Bzp - Bz) ** 2) + math.sqrt((Brm - Br) ** 2 + (Bzm - Bz) ** 2)
f3 = max(f3, Bp3)
return [f1, f2, f3]
class MyProblemBooth(Problem):
""" Describe simple one objective optimization problem. """
def set(self):
self.parameters = {'x_1': {'initial_value': 2.5, 'bounds': [-10, 10]},
'x_2': {'initial_value': 1.5, 'bounds': [-10, 10]}}
self.costs = [{'name': 'F'}]
def evaluate(self, x):
return [Booth.eval(x)]
class TestSimpleOptimization(unittest.TestCase):
""" Tests optimization problem."""
def xtest_local_problem_booth(self):
problem = MyProblemBooth("MyProblemBooth")
problem.surrogate = SurrogateModelScikit(problem)
#kernel = 1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1)
#problem.surrogate.regressor = GaussianProcessRegressor(kernel=kernel)
#problem.surrogate.has_epsilon = True
problem.surrogate.regressor = ExtraTreesRegressor(n_estimators=10)
# problem.surrogate.regressor = DecisionTreeRegressor()
problem.surrogate.train_step = 50
problem.surrogate.score_threshold = 0.0
# sweep analysis (for training)
gen = LHSGenerator(problem.parameters)
gen.init(problem.surrogate.train_step)
algorithm_sweep = SweepAlgorithm(problem, generator=gen)
algorithm_sweep.run()
# optimization
algorithm = NLopt(problem)
algorithm.options['algorithm'] = LN_BOBYQA
algorithm.options['n_iterations'] = 200
algorithm.run()
problem.logger.info("surrogate.predict_counter: {}".format(problem.surrogate.predict_counter))
problem.logger.info("surrogate.eval_counter: {}".format(problem.surrogate.eval_counter))
# print(problem.surrogate.x_data)
# print(problem.surrogate.y_data)
results = Results(problem)
optimum = results.find_optimum('F')
self.assertAlmostEqual(optimum, 1e-6, 3)
"""
kernels = [1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-1, 10.0)),
1.0 * RationalQuadratic(length_scale=1.0, alpha=0.1),
1.0 * ExpSineSquared(length_scale=1.0, periodicity=3.0,
length_scale_bounds=(0.1, 10.0),
periodicity_bounds=(1.0, 10.0)),
ConstantKernel(0.1, (0.01, 10.0))
* (DotProduct(sigma_0=1.0, sigma_0_bounds=(0.1, 10.0)) ** 2),
1.0 * Matern(length_scale=1.0, length_scale_bounds=(1e-5, 1e5), nu=1.5)]
for kernel in kernels:
print(kernel)
problem.surrogate = SurrogateModelScikit(problem)
# problem.surrogate.regressor = GaussianProcessRegressor(kernel=kernel)
# set threshold
problem.surrogate.sigma_threshold = 0.1
problem.surrogate.train_step = 10
algorithm = NLopt(problem)
algorithm.options['algorithm'] = LN_BOBYQA
algorithm.options['n_iterations'] = 200
algorithm.run()
problem.logger.info("surrogate.predict_counter: {}".format(problem.surrogate.predict_counter))
problem.logger.info("surrogate.eval_counter: {}".format(problem.surrogate.eval_counter))
"""
def xtest_local_problem_coil_one(self):
problem = MyProblemCoilOne("MyProblemCoilOne")
# enable surrogate
problem.surrogate = SurrogateModelScikit(problem)
problem.surrogate.regressor = DecisionTreeRegressor()
problem.surrogate.train_step = 30
problem.surrogate.score_threshold = 0.0
# sweep analysis (for training)
gen = LHSGenerator(problem.parameters)
gen.init(problem.surrogate.train_step)
algorithm_sweep = SweepAlgorithm(problem, generator=gen)
algorithm_sweep.run()
# optimization
algorithm = NLopt(problem)
algorithm.options['algorithm'] = LN_BOBYQA
algorithm.options['n_iterations'] = 50
algorithm.run()
problem.logger.info("surrogate.predict_counter: {}".format(problem.surrogate.predict_counter))
problem.logger.info("surrogate.eval_counter: {}".format(problem.surrogate.eval_counter))
results = Results(problem)
optimum = results.find_optimum('F1')
self.assertAlmostEqual(optimum, 5e-5, 4)
def xtest_local_problem_coil_one_bobyqa_optimum(self):
problem = MyProblemCoilOne("MyProblemCoilOne")
# optimization
algorithm = NLopt(problem)
algorithm.options['algorithm'] = LN_BOBYQA
algorithm.options['n_iterations'] = 500
algorithm.run()
problem.logger.info("surrogate.predict_counter: {}".format(problem.surrogate.predict_counter))
problem.logger.info("surrogate.eval_counter: {}".format(problem.surrogate.eval_counter))
results = Results(problem)
optimum = results.find_optimum('F1')
print("BOBYQA = {}".format(optimum))
# Bayes = 3.846087978861188e-05
# self.assertAlmostEqual(optimum, 1e-6, 4)
def xtest_local_problem_coil_one_bayesopt_optimum(self):
problem = MyProblemCoilOne("MyProblemCoilOne")
# optimization
algorithm = BayesOptSerial(problem)
algorithm.options['n_iterations'] = 500
algorithm.run()
problem.logger.info("surrogate.predict_counter: {}".format(problem.surrogate.predict_counter))
problem.logger.info("surrogate.eval_counter: {}".format(problem.surrogate.eval_counter))
results = Results(problem)
optimum = results.find_optimum('F1')
print("Bayes = {}".format(optimum))
# Bayes = 4.347142168223674e-05
# self.assertAlmostEqual(optimum, 1e-6, 4)
def xtest_local_problem_coil_one_nsgaii_optimum(self):
problem = MyProblemCoilOne("MyProblemCoilOne")
# optimization
algorithm = NSGAII(problem)
algorithm.options['max_population_number'] = 100
algorithm.options['max_population_size'] = 50
algorithm.run()
problem.logger.info("surrogate.predict_counter: {}".format(problem.surrogate.predict_counter))
problem.logger.info("surrogate.eval_counter: {}".format(problem.surrogate.eval_counter))
results = Results(problem)
optimum = results.find_optimum('F1')
print("NSGAII = {}".format(optimum))
# NSGAII = 8.099681801799041e-06
# self.assertAlmostEqual(optimum, 1e-6, 4)
"""
class TestSimpleOptimization(unittest.TestCase):
def test_local_problem_booth(self):
problem = MyProblemCoilOne("LocalPythonProblem")
#problem = MyProblemMultiTwo2("LocalPythonProblem")
#algorithm = BayesOptSerial(problem)
#algorithm.options['verbose_level'] = 0
#algorithm.options['n_iterations'] = 100
algorithm = NLopt(problem)
algorithm.options['algorithm'] = LN_BOBYQA
algorithm.options['n_iterations'] = 200
#algorithm = NSGA_II(problem)
#algorithm.options['max_population_number'] = 80
#algorithm.options['max_population_size'] = 20
t_s = time.time()
algorithm.run()
t = time.time() - t_s
print('Elapsed time:', t)
print("surrogate_predict_counter: ", problem.surrogate_predict_counter)
print("surrogate_eval_counter: ", problem.surrogate_eval_counter)
results = Results(problem)
optimum = results.find_minimum('F1')
print(optimum)
self.assertAlmostEqual(optimum, 1e-6, 3)
"""
"""
def figures(name):
import matplotlib
matplotlib.use('Agg')
import pylab as pl
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib import rc
data_store = SqliteDataStore(database_file=name + ".sqlite")
problem = ProblemSqliteDataStore(data_store)
data_x = []
data_y = []
pareto_front_x = []
pareto_front_y = []
for population in problem.populations:
if len(population.individuals) > 1:
for individual in population.individuals:
data_x.append(individual.costs[0])
data_y.append(individual.costs[1])
results = GraphicalResults(problem)
pareto_front_x, pareto_front_y = results.find_pareto({'F1': Results.MINIMIZE, 'F2': Results.MINIMIZE})
pl.rcParams['figure.figsize'] = 10, 4
pl.rcParams['legend.fontsize'] = 17
pl.rcParams['text.usetex'] = True
pl.rcParams['font.size'] = 20
pl.rcParams['font.serif'] = "Times"
pl.figure()
pl.plot(data_x, data_y, 'o', color='#d0d0d0', markersize=3)
pl.plot(pareto_front_x, pareto_front_y, 'o', markersize=4, label="Pareto Front")
pl.xlim(1e-4, 8e-4)
pl.ylim(0, 1e-3)
pl.grid(True)
pl.tight_layout()
pl.legend(loc="upper right")
pl.xlabel("$F_1$")
pl.ylabel("$F_2$")
pl.savefig(name + ".pdf", dpi=200)
"""
if __name__ == '__main__':
unittest.main()
| 34.520179 | 129 | 0.566706 | 12,429 | 0.807288 | 0 | 0 | 0 | 0 | 0 | 0 | 5,733 | 0.372369 |
dc3a778a081bc0e908fbf22ada6b3c5f69d5f4aa | 16,560 | py | Python | sdk/python/pulumi_kong/_inputs.py | pulumi/pulumi-kong | 775c17e4eac38934252410ed3dcdc6fc3bd40c5c | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2020-02-23T10:05:20.000Z | 2020-05-15T14:22:10.000Z | sdk/python/pulumi_kong/_inputs.py | pulumi/pulumi-kong | 775c17e4eac38934252410ed3dcdc6fc3bd40c5c | [
"ECL-2.0",
"Apache-2.0"
] | 41 | 2020-04-21T22:04:23.000Z | 2022-03-31T15:29:53.000Z | sdk/python/pulumi_kong/_inputs.py | pulumi/pulumi-kong | 775c17e4eac38934252410ed3dcdc6fc3bd40c5c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'RouteDestinationArgs',
'RouteHeaderArgs',
'RouteSourceArgs',
'UpstreamHealthchecksArgs',
'UpstreamHealthchecksActiveArgs',
'UpstreamHealthchecksActiveHealthyArgs',
'UpstreamHealthchecksActiveUnhealthyArgs',
'UpstreamHealthchecksPassiveArgs',
'UpstreamHealthchecksPassiveHealthyArgs',
'UpstreamHealthchecksPassiveUnhealthyArgs',
]
@pulumi.input_type
class RouteDestinationArgs:
def __init__(__self__, *,
ip: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None):
if ip is not None:
pulumi.set(__self__, "ip", ip)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@pulumi.input_type
class RouteHeaderArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
values: pulumi.Input[Sequence[pulumi.Input[str]]]):
"""
:param pulumi.Input[str] name: The name of the route
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the route
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "values")
@values.setter
def values(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class RouteSourceArgs:
def __init__(__self__, *,
ip: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None):
if ip is not None:
pulumi.set(__self__, "ip", ip)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def ip(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "ip")
@ip.setter
def ip(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@pulumi.input_type
class UpstreamHealthchecksArgs:
def __init__(__self__, *,
active: Optional[pulumi.Input['UpstreamHealthchecksActiveArgs']] = None,
passive: Optional[pulumi.Input['UpstreamHealthchecksPassiveArgs']] = None):
if active is not None:
pulumi.set(__self__, "active", active)
if passive is not None:
pulumi.set(__self__, "passive", passive)
@property
@pulumi.getter
def active(self) -> Optional[pulumi.Input['UpstreamHealthchecksActiveArgs']]:
return pulumi.get(self, "active")
@active.setter
def active(self, value: Optional[pulumi.Input['UpstreamHealthchecksActiveArgs']]):
pulumi.set(self, "active", value)
@property
@pulumi.getter
def passive(self) -> Optional[pulumi.Input['UpstreamHealthchecksPassiveArgs']]:
return pulumi.get(self, "passive")
@passive.setter
def passive(self, value: Optional[pulumi.Input['UpstreamHealthchecksPassiveArgs']]):
pulumi.set(self, "passive", value)
@pulumi.input_type
class UpstreamHealthchecksActiveArgs:
def __init__(__self__, *,
concurrency: Optional[pulumi.Input[int]] = None,
healthy: Optional[pulumi.Input['UpstreamHealthchecksActiveHealthyArgs']] = None,
http_path: Optional[pulumi.Input[str]] = None,
https_sni: Optional[pulumi.Input[str]] = None,
https_verify_certificate: Optional[pulumi.Input[bool]] = None,
timeout: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
unhealthy: Optional[pulumi.Input['UpstreamHealthchecksActiveUnhealthyArgs']] = None):
if concurrency is not None:
pulumi.set(__self__, "concurrency", concurrency)
if healthy is not None:
pulumi.set(__self__, "healthy", healthy)
if http_path is not None:
pulumi.set(__self__, "http_path", http_path)
if https_sni is not None:
pulumi.set(__self__, "https_sni", https_sni)
if https_verify_certificate is not None:
pulumi.set(__self__, "https_verify_certificate", https_verify_certificate)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
if type is not None:
pulumi.set(__self__, "type", type)
if unhealthy is not None:
pulumi.set(__self__, "unhealthy", unhealthy)
@property
@pulumi.getter
def concurrency(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "concurrency")
@concurrency.setter
def concurrency(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "concurrency", value)
@property
@pulumi.getter
def healthy(self) -> Optional[pulumi.Input['UpstreamHealthchecksActiveHealthyArgs']]:
return pulumi.get(self, "healthy")
@healthy.setter
def healthy(self, value: Optional[pulumi.Input['UpstreamHealthchecksActiveHealthyArgs']]):
pulumi.set(self, "healthy", value)
@property
@pulumi.getter(name="httpPath")
def http_path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "http_path")
@http_path.setter
def http_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "http_path", value)
@property
@pulumi.getter(name="httpsSni")
def https_sni(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "https_sni")
@https_sni.setter
def https_sni(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "https_sni", value)
@property
@pulumi.getter(name="httpsVerifyCertificate")
def https_verify_certificate(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "https_verify_certificate")
@https_verify_certificate.setter
def https_verify_certificate(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "https_verify_certificate", value)
@property
@pulumi.getter
def timeout(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "timeout")
@timeout.setter
def timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def unhealthy(self) -> Optional[pulumi.Input['UpstreamHealthchecksActiveUnhealthyArgs']]:
return pulumi.get(self, "unhealthy")
@unhealthy.setter
def unhealthy(self, value: Optional[pulumi.Input['UpstreamHealthchecksActiveUnhealthyArgs']]):
pulumi.set(self, "unhealthy", value)
@pulumi.input_type
class UpstreamHealthchecksActiveHealthyArgs:
def __init__(__self__, *,
http_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
interval: Optional[pulumi.Input[int]] = None,
successes: Optional[pulumi.Input[int]] = None):
if http_statuses is not None:
pulumi.set(__self__, "http_statuses", http_statuses)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if successes is not None:
pulumi.set(__self__, "successes", successes)
@property
@pulumi.getter(name="httpStatuses")
def http_statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
return pulumi.get(self, "http_statuses")
@http_statuses.setter
def http_statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "http_statuses", value)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter
def successes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "successes")
@successes.setter
def successes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "successes", value)
@pulumi.input_type
class UpstreamHealthchecksActiveUnhealthyArgs:
def __init__(__self__, *,
http_failures: Optional[pulumi.Input[int]] = None,
http_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
interval: Optional[pulumi.Input[int]] = None,
tcp_failures: Optional[pulumi.Input[int]] = None,
timeouts: Optional[pulumi.Input[int]] = None):
if http_failures is not None:
pulumi.set(__self__, "http_failures", http_failures)
if http_statuses is not None:
pulumi.set(__self__, "http_statuses", http_statuses)
if interval is not None:
pulumi.set(__self__, "interval", interval)
if tcp_failures is not None:
pulumi.set(__self__, "tcp_failures", tcp_failures)
if timeouts is not None:
pulumi.set(__self__, "timeouts", timeouts)
@property
@pulumi.getter(name="httpFailures")
def http_failures(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http_failures")
@http_failures.setter
def http_failures(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_failures", value)
@property
@pulumi.getter(name="httpStatuses")
def http_statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
return pulumi.get(self, "http_statuses")
@http_statuses.setter
def http_statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "http_statuses", value)
@property
@pulumi.getter
def interval(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "interval")
@interval.setter
def interval(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "interval", value)
@property
@pulumi.getter(name="tcpFailures")
def tcp_failures(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "tcp_failures")
@tcp_failures.setter
def tcp_failures(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tcp_failures", value)
@property
@pulumi.getter
def timeouts(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "timeouts")
@timeouts.setter
def timeouts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeouts", value)
@pulumi.input_type
class UpstreamHealthchecksPassiveArgs:
def __init__(__self__, *,
healthy: Optional[pulumi.Input['UpstreamHealthchecksPassiveHealthyArgs']] = None,
type: Optional[pulumi.Input[str]] = None,
unhealthy: Optional[pulumi.Input['UpstreamHealthchecksPassiveUnhealthyArgs']] = None):
if healthy is not None:
pulumi.set(__self__, "healthy", healthy)
if type is not None:
pulumi.set(__self__, "type", type)
if unhealthy is not None:
pulumi.set(__self__, "unhealthy", unhealthy)
@property
@pulumi.getter
def healthy(self) -> Optional[pulumi.Input['UpstreamHealthchecksPassiveHealthyArgs']]:
return pulumi.get(self, "healthy")
@healthy.setter
def healthy(self, value: Optional[pulumi.Input['UpstreamHealthchecksPassiveHealthyArgs']]):
pulumi.set(self, "healthy", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter
def unhealthy(self) -> Optional[pulumi.Input['UpstreamHealthchecksPassiveUnhealthyArgs']]:
return pulumi.get(self, "unhealthy")
@unhealthy.setter
def unhealthy(self, value: Optional[pulumi.Input['UpstreamHealthchecksPassiveUnhealthyArgs']]):
pulumi.set(self, "unhealthy", value)
@pulumi.input_type
class UpstreamHealthchecksPassiveHealthyArgs:
def __init__(__self__, *,
http_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
successes: Optional[pulumi.Input[int]] = None):
if http_statuses is not None:
pulumi.set(__self__, "http_statuses", http_statuses)
if successes is not None:
pulumi.set(__self__, "successes", successes)
@property
@pulumi.getter(name="httpStatuses")
def http_statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
return pulumi.get(self, "http_statuses")
@http_statuses.setter
def http_statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "http_statuses", value)
@property
@pulumi.getter
def successes(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "successes")
@successes.setter
def successes(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "successes", value)
@pulumi.input_type
class UpstreamHealthchecksPassiveUnhealthyArgs:
def __init__(__self__, *,
http_failures: Optional[pulumi.Input[int]] = None,
http_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,
tcp_failures: Optional[pulumi.Input[int]] = None,
timeouts: Optional[pulumi.Input[int]] = None):
if http_failures is not None:
pulumi.set(__self__, "http_failures", http_failures)
if http_statuses is not None:
pulumi.set(__self__, "http_statuses", http_statuses)
if tcp_failures is not None:
pulumi.set(__self__, "tcp_failures", tcp_failures)
if timeouts is not None:
pulumi.set(__self__, "timeouts", timeouts)
@property
@pulumi.getter(name="httpFailures")
def http_failures(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "http_failures")
@http_failures.setter
def http_failures(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "http_failures", value)
@property
@pulumi.getter(name="httpStatuses")
def http_statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]:
return pulumi.get(self, "http_statuses")
@http_statuses.setter
def http_statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]]):
pulumi.set(self, "http_statuses", value)
@property
@pulumi.getter(name="tcpFailures")
def tcp_failures(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "tcp_failures")
@tcp_failures.setter
def tcp_failures(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "tcp_failures", value)
@property
@pulumi.getter
def timeouts(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "timeouts")
@timeouts.setter
def timeouts(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeouts", value)
| 34.936709 | 103 | 0.650906 | 15,624 | 0.943478 | 0 | 0 | 15,814 | 0.954952 | 0 | 0 | 2,505 | 0.151268 |
dc3da5a24d4fd4a5555347785b65914a5905c48f | 728 | py | Python | common/__init__.py | timmartin19/pycon-ripozo-tutorial | d6f68d0b7c8c8aacb090014c5ff1f34b21ded017 | [
"MIT"
] | null | null | null | common/__init__.py | timmartin19/pycon-ripozo-tutorial | d6f68d0b7c8c8aacb090014c5ff1f34b21ded017 | [
"MIT"
] | null | null | null | common/__init__.py | timmartin19/pycon-ripozo-tutorial | d6f68d0b7c8c8aacb090014c5ff1f34b21ded017 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from logging import config
config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s| %(name)s/%(process)d: %(message)s @%(funcName)s:%(lineno)d #%(levelname)s',
}
},
'handlers': {
'console': {
'formatter': 'standard',
'class': 'logging.StreamHandler',
}
},
'root': {
'handlers': ['console'],
'level': 'INFO',
},
'loggers': {
'ripozo': {
'level': 'INFO',
}
}
})
| 22.75 | 111 | 0.541209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.399725 |
dc3dd9ba91b522b9cfe61eccaaba1f3a5d171d62 | 612 | py | Python | 2019_618_PickMaomao/2019_618_PickMaomao.py | yanaizhen/PythonApps | 21c554980df00795e1af6a8a17224358222d28e5 | [
"MIT"
] | 1 | 2021-07-06T11:12:54.000Z | 2021-07-06T11:12:54.000Z | 2019_618_PickMaomao/2019_618_PickMaomao.py | yanaizhen/PythonApps | 21c554980df00795e1af6a8a17224358222d28e5 | [
"MIT"
] | null | null | null | 2019_618_PickMaomao/2019_618_PickMaomao.py | yanaizhen/PythonApps | 21c554980df00795e1af6a8a17224358222d28e5 | [
"MIT"
] | 2 | 2019-12-09T16:31:26.000Z | 2021-08-15T08:09:37.000Z | # @Time : 2019/06/14 7:55AM
# @Author : HGzhao
# @File : 2019_618_PickMaomao.py
import os,time
def pick_maomao():
print(f"点 合合卡 按钮")
os.system('adb shell input tap 145 1625')
time.sleep(1)
print(f"点 进店找卡 按钮")
os.system('adb shell input tap 841 1660')
time.sleep(13)
print(f"猫猫出现啦,点击得喵币")
os.system('adb shell input tap 967 1134')
time.sleep(1)
print(f"点 开心收下")
os.system('adb shell input tap 569 1380')
time.sleep(1)
print(f"利用全面屏手势退出店铺")
os.system('adb shell input swipe 0 1500 500 1500')
time.sleep(1)
for i in range(40):
pick_maomao() | 23.538462 | 54 | 0.630719 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 383 | 0.553468 |
dc3e7021531cfd65338ffc0247d5d19af8ab45a7 | 676 | py | Python | examples/helix-example/helix_example/components/python.py | HELIX-Datasets/helix | 7b89b4139e580518b58e109a96ef70f2a71bb780 | [
"MIT"
] | 7 | 2021-12-15T03:22:29.000Z | 2022-03-09T16:11:08.000Z | examples/helix-example/helix_example/components/python.py | HELIX-Datasets/helix | 7b89b4139e580518b58e109a96ef70f2a71bb780 | [
"MIT"
] | 10 | 2021-09-14T16:39:31.000Z | 2021-09-14T21:41:49.000Z | examples/helix-example/helix_example/components/python.py | HELIX-Datasets/helix | 7b89b4139e580518b58e109a96ef70f2a71bb780 | [
"MIT"
] | 1 | 2022-01-31T00:01:58.000Z | 2022-01-31T00:01:58.000Z | from helix import component
class ExamplePythonComponent(component.Component):
"""An example Python component."""
name = "example-python-component"
verbose_name = "Example Python Component"
type = "example"
version = "1.0.0"
description = "An example Python component"
date = "2020-10-20 12:00:00.000000"
tags = (("group", "example"),)
blueprints = ["example-python"]
functions = [
"""def ${example}():
print("hello world")
""",
"""from datetime import datetime
def ${now}():
print(datetime.now())
""",
]
calls = {"startup": ["${example}()"], "loop": ["${now}()"]}
globals = ["example", "now"]
| 23.310345 | 63 | 0.587278 | 645 | 0.954142 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.547337 |
dc3f216b9c59801796c698b0559906cae4053734 | 2,239 | py | Python | cdap-stream-clients/python/cdap_stream_client/streamwriter.py | caskdata/cdap-ingest | 17ce95a9c38fec4db36f4f42f93b4ef19397bf7f | [
"Apache-2.0"
] | 5 | 2015-03-11T21:16:33.000Z | 2018-01-28T14:00:50.000Z | cdap-stream-clients/python/cdap_stream_client/streamwriter.py | cdapio/cdap-ingest | 17ce95a9c38fec4db36f4f42f93b4ef19397bf7f | [
"Apache-2.0"
] | 12 | 2015-01-10T02:54:13.000Z | 2017-03-22T22:31:48.000Z | cdap-stream-clients/python/cdap_stream_client/streamwriter.py | caskdata/cdap-ingest | 17ce95a9c38fec4db36f4f42f93b4ef19397bf7f | [
"Apache-2.0"
] | 8 | 2015-05-13T15:07:40.000Z | 2018-10-24T10:51:27.000Z | # -*- coding: utf-8 -*-
# Copyright © 2014 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import locale
from .serviceconnector import ServiceConnector, ConnectionErrorChecker
from .streampromise import StreamPromise
class StreamWriter(object):
def __init__(self, serviceConnector, uri):
u"""
Object constructor
Keyword arguments:
serviceConnector -- reference to connection pool to communicate
with gateway server.
uri -- REST URL part to perform request.
Example: '/v2/streams/myStream'
data -- data to proceed by worker thread. Please read
'__workerTarget' documentation.
"""
if not isinstance(serviceConnector, ServiceConnector):
raise TypeError(u'parameter should be of type ServiceConnector')
self.__serviceConnector = serviceConnector
self.__serviceUri = uri
def write(self, message, charset=None, headers=None):
u"""
Ingest a stream event with a string as body.
Keyword arguments:
message -- Data to transmit to REST server.
Could be of type None if file field is presented.
charset -- Message field content charset. Could be of type None.
Default value: 'utf-8'
headers -- Additional HTTP headers. Should be of type 'dict'.
Returns:
StreamPromise instance for further handling
"""
promiseData = {
u'message': message,
u'charset': charset,
u'headers': headers
}
return StreamPromise(self.__serviceConnector, self.__serviceUri,
promiseData)
| 34.446154 | 80 | 0.648057 | 1,496 | 0.667857 | 0 | 0 | 0 | 0 | 0 | 0 | 1,547 | 0.690625 |
dc3f2daf823ff18bc22ad5400ef8f405209e75b1 | 53,877 | py | Python | ACDCDataManipulator.py | ACDC-paper-double-review/ACDC | bf44a691d3860eb35731c0ce41f606188f233b1a | [
"MIT"
] | null | null | null | ACDCDataManipulator.py | ACDC-paper-double-review/ACDC | bf44a691d3860eb35731c0ce41f606188f233b1a | [
"MIT"
] | null | null | null | ACDCDataManipulator.py | ACDC-paper-double-review/ACDC | bf44a691d3860eb35731c0ce41f606188f233b1a | [
"MIT"
] | null | null | null | import numpy as np
import pandas
import pandas as pd
import torch
import torchvision
import ssl
import gzip
import json
from tqdm import tqdm
from torchvision.datasets.utils import download_url
from MySingletons import MyWord2Vec
from nltk.tokenize import TweetTokenizer
import os
import tarfile
from lxml import etree
class MyCustomBikeSharingDataLoader(torch.utils.data.Dataset):
path = 'data/BikeSharing/'
df = None
@property
def datasets(self):
return self.df
def __init__(self, london_or_washignton : str = 'london'):
if london_or_washignton.lower() == 'london':
self.base_files = ['london_merged.csv']
self.base_url = 'https://www.kaggle.com/marklvl/bike-sharing-dataset'
elif london_or_washignton.lower() == 'washington':
self.base_files = ['hour.csv', 'day.csv']
self.base_url = 'https://www.kaggle.com/hmavrodiev/london-bike-sharing-dataset'
if not os.path.exists(self.path):
os.makedirs(self.path)
for file in self.base_files:
if os.path.isfile(f'{self.path}{file}'):
if self.df is None:
self.df = pandas.read_csv(f'{self.path}{file}')
else:
df = pandas.read_csv(f'{self.path}{file}')
self.df = pd.concat([self.df, df], sort=True)
else:
print(f'Please, manually download file {file} from url {self.base_url} and put it at path {self.path}')
exit()
if london_or_washignton.lower() == 'london':
self.df['demand'] = (self.df['cnt'] <= self.df['cnt'].median()).astype(int)
self.df.drop(columns=['timestamp', 'cnt'], inplace=True)
self.df = self.df[
['t1', 't2', 'hum', 'wind_speed', 'weather_code', 'is_holiday', 'is_weekend', 'season', 'demand']]
elif london_or_washignton.lower() == 'washington':
self.df['demand'] = (self.df['cnt'] <= self.df['cnt'].median()).astype(int)
self.df.drop(columns=['casual', 'dteday', 'holiday', 'hr', 'instant', 'mnth', 'registered','yr', 'cnt'],
inplace=True)
self.df.rename(
columns={'temp': 't1', 'atemp': 't2', 'windspeed': 'wind_speed', 'weathersit': 'weather_code',
'workingday': 'is_holiday', 'weekday': 'is_weekend'}, inplace=True)
self.df['is_weekend'] = ((self.df['is_weekend'] == 0) | (self.df['is_weekend'] == 6)).astype(int)
self.df['is_holiday'] = (self.df['is_holiday'] == 0).astype(int)
self.df['season'] = self.df['season'] - 1
self.df = self.df[
['t1', 't2', 'hum', 'wind_speed', 'weather_code', 'is_holiday', 'is_weekend', 'season', 'demand']]
self.normalize()
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
item = self.df.iloc[idx]
if idx < len(self):
try:
return {'x': item.drop('demand').to_numpy(), 'y': item['demand']}
except:
return self.__getitem__(idx - 1)
else:
return None
def normalize(self, a: int = 0, b: int = 1):
assert a < b
for feature_name in self.df.drop('demand', axis=1).columns:
max_value = self.df[feature_name].max()
min_value = self.df[feature_name].min()
self.df[feature_name] = (b - a) * (self.df[feature_name] - min_value) / (max_value - min_value) + a
class MyCustomAmazonReviewDataLoader(torch.utils.data.Dataset):
base_url = 'http://deepyeti.ucsd.edu/jianmo/amazon/categoryFilesSmall/'
path = 'data/AmazonReview/'
df = None
@property
def datasets(self):
return self.df
def __init__(self, filename):
torchvision.datasets.utils.download_url(self.base_url + filename, self.path)
self.df = self.get_df(self.path + filename)
self.normalize()
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
item = self.df.iloc[idx]
if idx < len(self):
try:
return {'x': item.drop('overall').to_numpy(), 'y': item['overall']}
except:
return self.__getitem__(idx - 1)
else:
return None
def normalize(self, a: int = 0, b: int = 1):
assert a < b
for feature_name in self.df.drop('overall', axis=1).columns:
max_value = self.df[feature_name].max()
min_value = self.df[feature_name].min()
self.df[feature_name] = (b - a) * (self.df[feature_name] - min_value) / (max_value - min_value) + a
@staticmethod
def parse(path):
g = gzip.open(path, 'r')
for l in g:
yield json.loads(l)
def get_df(self, path, high_bound=500000):
try:
print('Trying to load processed file %s.h5 from disc...' % path)
df = pd.read_hdf(path_or_buf=os.path.join(os.path.dirname(__file__), path + '.h5'),
key='df')
except:
print('Processed file does not exists')
print('Reading dataset into memory and applying Word2Vec...')
print('\nWe will save a maximum of half million samples because of memory constraints')
print('and because that is more than sufficient samples to test transfer learning models\n')
i = 0
df = {}
if path == 'data/AmazonReview/AMAZON_FASHION_5.json.gz':
total = 3176
elif path == 'data/AmazonReview/All_Beauty_5.json.gz':
total = 5269
elif path == 'data/AmazonReview/Appliances_5.json.gz':
total = 2277
elif path == 'data/AmazonReview/Arts_Crafts_and_Sewing_5.json.gz':
total = 494485
elif path == 'data/AmazonReview/Automotive_5.json.gz':
total = 1711519
elif path == 'data/AmazonReview/Books_5.json.gz':
total = 27164983
elif path == 'data/AmazonReview/CDs_and_Vinyl_5.json.gz':
total = 1443755
elif path == 'data/AmazonReview/Cell_Phones_and_Accessories_5.json.gz':
total = 1128437
elif path == 'data/AmazonReview/Clothing_Shoes_and_Jewelry_5.json.gz':
total = 11285464
elif path == 'data/AmazonReview/Digital_Music_5.json.gz':
total = 169781
elif path == 'data/AmazonReview/Electronics_5.json.gz':
total = 6739590
elif path == 'data/AmazonReview/Gift_Cards_5.json.gz':
total = 2972
elif path == 'data/AmazonReview/Grocery_and_Gourmet_Food_5.json.gz':
total = 1143860
elif path == 'data/AmazonReview/Home_and_Kitchen_5.json.gz':
total = 6898955
elif path == 'data/AmazonReview/Industrial_and_Scientific_5.json.gz':
total = 77071
elif path == 'data/AmazonReview/Kindle_Store_5.json.gz':
total = 2222983
elif path == 'data/AmazonReview/Luxury_Beauty_5.json.gz':
total = 34278
elif path == 'data/AmazonReview/Magazine_Subscriptions_5.json.gz':
total = 2375
elif path == 'data/AmazonReview/Movies_and_TV_5.json.gz':
total = 3410019
elif path == 'data/AmazonReview/Musical_Instruments_5.json.gz':
total = 231392
elif path == 'data/AmazonReview/Office_Products_5.json.gz':
total = 800357
elif path == 'data/AmazonReview/Patio_Lawn_and_Garden_5.json.gz':
total = 798415
elif path == 'data/AmazonReview/Pet_Supplies_5.json.gz':
total = 2098325
elif path == 'data/AmazonReview/Prime_Pantry_5.json.gz':
total = 137788
elif path == 'data/AmazonReview/Software_5.json.gz':
total = 12805
elif path == 'data/AmazonReview/Sports_and_Outdoors_5.json.gz':
total = 2839940
elif path == 'data/AmazonReview/Tools_and_Home_Improvement_5.json.gz':
total = 2070831
elif path == 'data/AmazonReview/Toys_and_Games_5.json.gz':
total = 1828971
elif path == 'data/AmazonReview/Video_Games_5.json.gz':
total = 497577
MyWord2Vec().get()
pbar = tqdm(unit=' samples', total=np.min([total, high_bound]))
tokenizer = TweetTokenizer()
for d in self.parse(path):
if i >= 500000:
break
try:
reviewText = d['reviewText']
try:
word_count = 0
vector = np.zeros(MyWord2Vec().get().vector_size)
for word in tokenizer.tokenize(reviewText):
try:
vector += MyWord2Vec().get()[word]
word_count += 1
except:
pass
if word_count > 1:
try:
overall = d['overall']
df[i] = {'overall': overall, 'reviewText': vector / word_count}
pbar.update(1)
i += 1
except:
pass
except:
pass
except:
pass
pbar.close()
print('Saving processed tokenized dataset in disc for future usage...')
df = pd.DataFrame.from_dict(df, orient='index')
df = pd.DataFrame([{x: y for x, y in enumerate(item)}
for item in df['reviewText'].values.tolist()]).assign(overall=df.overall.tolist())
df.to_hdf(path_or_buf=os.path.join(os.path.dirname(__file__), path + '.h5'),
key='df',
mode='w',
format='table',
complevel=9,
complib='bzip2')
df = pd.read_hdf(path_or_buf=os.path.join(os.path.dirname(__file__), path + '.h5'),
key='df')
return df
class MyCustomAmazonReviewNIPSDataLoader(torch.utils.data.Dataset):
dataset_url = 'https://www.cs.jhu.edu/~mdredze/datasets/sentiment/processed_stars.tar.gz'
path = 'data/AmazonReviewNIPS/'
compressed_filename = path + 'processed_stars.tar.gz'
books_file_path = path + 'processed_stars/books/all_balanced.review'
dvd_file_path = path + 'processed_stars/dvd/all_balanced.review'
electronics_file_path = path + 'processed_stars/electronics/all_balanced.review'
kitchen_file_path = path + 'processed_stars/kitchen/all_balanced.review'
@property
def datasets(self):
return self.df
def __init__(self, folder):
torchvision.datasets.utils.download_url(self.dataset_url, self.path)
tar = tarfile.open(self.compressed_filename)
tar.extractall(self.path)
tar.close()
if folder == 'books':
filename_path = self.books_file_path
elif folder == 'dvd':
filename_path = self.dvd_file_path
elif folder == 'electronics':
filename_path = self.electronics_file_path
elif folder == 'kitchen':
filename_path = self.kitchen_file_path
self.df = self.get_df(filename_path)
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
item = self.df.iloc[idx]
if idx < len(self):
return {'x': item['x'], 'y': item['targets']}
else:
return None
def get_df(self, path):
try:
print('Trying to load processed file %s.h5 from disc...' % path)
df = pd.read_hdf(path_or_buf=path + '.h5',
key='df')
except:
print('Processed file does not exists')
print('Reading dataset into memory and applying Word2Vec...')
if path == self.books_file_path:
total = 5501
elif path == self.dvd_file_path:
total = 5518
elif path == self.electronics_file_path:
total = 5901
elif path == self.kitchen_file_path:
total = 5149
line_count = 0
df = {}
MyWord2Vec().get()
pbar = tqdm(unit=' samples', total=total)
for line in open(path, 'rb'):
word_count = 0
vector = np.zeros(MyWord2Vec().get().vector_size)
for word in line.decode('utf-8').split(' '):
x, y = word.split(':')
if x != '#label#':
for j in range(int(y)):
for xx in x.split('_'):
try:
vector += MyWord2Vec().get()[xx]
word_count += 1
except:
pass
else:
try:
df[line_count] = {'x': vector / word_count, 'targets': int(float(y.replace('\n', '')))}
except:
df[line_count] = {'x': vector / word_count, 'targets': int(float(y))}
line_count += 1
pbar.update(1)
pbar.close()
print('Saving processed tokenized dataset in disc for future usage...')
df = pd.DataFrame.from_dict(df, orient='index')
df.to_hdf(path_or_buf=path + '.h5',
key='df',
mode='w',
format='table',
complevel=9,
complib='bzip2')
df = pd.read_hdf(path_or_buf=path + '.h5',
key='df')
return df
class MyCustomNewsPopularityDataLoader(torch.utils.data.Dataset):
dataset_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/News_Final.csv'
path = 'data/UCIMultiSourceNews/'
filename = 'News_Final.csv'
@property
def datasets(self):
return self.df
def __init__(self, topic: str, social_feed: str):
torchvision.datasets.utils.download_url(self.dataset_url, self.path)
path = (self.path + topic + '_' + social_feed + '.h5').lower()
try:
print('Trying to load processed file %s from disc...' % path)
self.df = pd.read_hdf(path_or_buf=path,
key='df')
except:
print('Processed file does not exists')
print('Reading dataset into memory and applying Word2Vec...')
self.df = {}
df = pd.read_csv(self.path + self.filename)
if social_feed == 'all':
df = df.loc[df['Topic'] == topic][['Title', 'Headline', 'Facebook', 'GooglePlus', 'LinkedIn']]
else:
df = df.loc[df['Topic'] == topic][['Title', 'Headline', social_feed]]
df['targets'] = df[df.columns[2:]].sum(axis=1)
df = df[['Title', 'Headline', 'targets']]
df.loc[df['targets'] <= 10, 'targets'] = 0
df.loc[df['targets'] > 10, 'targets'] = 1
df['fullText'] = df['Title'].astype(str) + ' ' + df['Headline'].astype(str)
tokenizer = TweetTokenizer()
MyWord2Vec().get()
sample_count = 0
pbar = tqdm(unit=' samples', total=len(df))
for _, row in df.iterrows():
word_counter = 0
vector = np.zeros(MyWord2Vec().get().vector_size)
try:
for word in tokenizer.tokenize(row['fullText']):
vector += MyWord2Vec().get()[word]
word_counter += 1
except:
pass
if word_counter > 0:
self.df[sample_count] = {'x': vector / word_counter, 'targets': int(row['targets'])}
sample_count += 1
pbar.update(1)
pbar.close()
print('Saving processed tokenized dataset in disc for future usage...')
self.df = pd.DataFrame.from_dict(self.df, orient='index')
self.df = pd.DataFrame([{x: y for x, y in enumerate(item)} for item in self.df['x'].values.tolist()],
index=self.df.index).assign(targets=self.df['targets'].tolist())
self.df.to_hdf(path_or_buf=path,
key='df',
mode='w',
format='table',
complevel=9,
complib='bzip2')
self.df = pd.read_hdf(path_or_buf=path,
key='df')
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
item = self.df.iloc[idx]
if idx < len(self):
return {'x': item.drop('targets').to_numpy(), 'y': int(item['targets'])}
else:
return None
class MyCustomAmazonReviewACLDataLoader(torch.utils.data.Dataset):
dataset_url = 'https://www.cs.jhu.edu/~mdredze/datasets/sentiment/unprocessed.tar.gz'
path = 'data/AmazonReviewACL/'
compressed_filename = path + 'unprocessed.tar.gz'
apparel_file_path = path + 'sorted_data/apparel/all.review'
automotive_file_path = path + 'sorted_data/automotive/all.review'
baby_file_path = path + 'sorted_data/baby/all.review'
beauty_file_path = path + 'sorted_data/beauty/all.review'
books_file_path = path + 'sorted_data/books/all.review'
camera_photo_file_path = path + 'sorted_data/camera_&_photo/all.review'
cell_phones_service_file_path = path + 'sorted_data/cell_phones_&_service/all.review'
computer_video_games_file_path = path + 'sorted_data/computer_&_video_games/all.review'
dvd_file_path = path + 'sorted_data/dvd/all.review'
electronics_file_path = path + 'sorted_data/electronics/all.review'
gourmet_food_file_path = path + 'sorted_data/gourmet_food/all.review'
grocery_file_path = path + 'sorted_data/grocery/all.review'
health_personal_care_file_path = path + 'sorted_data/health_&_personal_care/all.review'
jewelry_watches_file_path = path + 'sorted_data/jewelry_&_watches/all.review'
kitchen_housewares_file_path = path + 'sorted_data/kitchen_&_housewares/all.review'
magazines_file_path = path + 'sorted_data/magazines/all.review'
music_file_path = path + 'sorted_data/music/all.review'
musical_instruments_file_path = path + 'sorted_data/musical_instruments/all.review'
office_products_file_path = path + 'sorted_data/office_products/all.review'
outdoor_living_file_path = path + 'sorted_data/outdoor_living/all.review'
software_file_path = path + 'sorted_data/software/all.review'
sports_outdoors_file_path = path + 'sorted_data/sports_&_outdoors/all.review'
tools_hardware_file_path = path + 'sorted_data/tools_&_hardware/all.review'
toys_games_file_path = path + 'sorted_data/toys_&_games/all.review'
video_file_path = path + 'sorted_data/video/all.review'
@property
def datasets(self):
return self.df
def __init__(self, folder):
torchvision.datasets.utils.download_url(self.dataset_url, self.path)
tar = tarfile.open(self.compressed_filename)
tar.extractall(self.path)
tar.close()
if folder == 'apparel': filename_path = self.apparel_file_path
if folder == 'automotive': filename_path = self.automotive_file_path
if folder == 'baby': filename_path = self.baby_file_path
if folder == 'beauty': filename_path = self.beauty_file_path
if folder == 'books': filename_path = self.books_file_path
if folder == 'camera_photo': filename_path = self.camera_photo_file_path
if folder == 'cell_phones_service': filename_path = self.cell_phones_service_file_path
if folder == 'computer_video_games': filename_path = self.computer_video_games_file_path
if folder == 'dvd': filename_path = self.dvd_file_path
if folder == 'electronics': filename_path = self.electronics_file_path
if folder == 'gourmet_food': filename_path = self.gourmet_food_file_path
if folder == 'grocery': filename_path = self.grocery_file_path
if folder == 'health_personal_care': filename_path = self.health_personal_care_file_path
if folder == 'jewelry_watches': filename_path = self.jewelry_watches_file_path
if folder == 'kitchen_housewares': filename_path = self.kitchen_housewares_file_path
if folder == 'magazines': filename_path = self.magazines_file_path
if folder == 'music': filename_path = self.music_file_path
if folder == 'musical_instruments': filename_path = self.musical_instruments_file_path
if folder == 'office_products': filename_path = self.office_products_file_path
if folder == 'outdoor_living': filename_path = self.outdoor_living_file_path
if folder == 'software': filename_path = self.software_file_path
if folder == 'sports_outdoors': filename_path = self.sports_outdoors_file_path
if folder == 'tools_hardware': filename_path = self.tools_hardware_file_path
if folder == 'toys_games': filename_path = self.toys_games_file_path
if folder == 'video': filename_path = self.video_file_path
self.df = self.get_df(filename_path)
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
item = self.df.iloc[idx]
if idx < len(self):
return {'x': item['x'], 'y': item['targets']}
else:
return None
def get_df(self, path):
try:
os.remove(path + '.xml')
except:
pass
try:
print('Trying to load processed file %s.h5 from disc...' % path)
df = pd.read_hdf(path_or_buf=path + '.h5',
key='df')
except:
print('Processed file does not exists')
print('Reading dataset into memory and applying Word2Vec...')
with open(path + '.xml', 'w', encoding='utf-8-sig') as f:
f.write('<amazonreview>')
for line in open(path, 'rb'):
f.write(line.decode(encoding='utf-8-sig', errors='ignore'))
f.write('</amazonreview>')
parser = etree.XMLParser(recover=True)
with open(path + '.xml', 'r', encoding='utf-8-sig') as f:
contents = f.read()
tree = etree.fromstring(contents, parser=parser)
df = {}
tokenizer = TweetTokenizer()
MyWord2Vec().get()
line_count = 0
pbar = tqdm(unit=' samples', total=len(tree.findall('review')) - 1)
for review in tree.findall('review'):
word_count = 0
vector = np.zeros(MyWord2Vec().get().vector_size)
try:
for word in tokenizer.tokenize(review.find('review_text').text):
try:
vector += MyWord2Vec().get()[word]
word_count += 1
except:
pass
if word_count > 0:
try:
score = int(float(review.find('rating').text.replace('\n', '')))
if type(score) is int:
df[line_count] = {'x': vector / word_count, 'targets': score}
line_count += 1
pbar.update(1)
except:
pass
except:
pass
pbar.close()
print('Saving processed tokenized dataset in disc for future usage...')
df = pd.DataFrame.from_dict(df, orient='index')
df.to_hdf(path_or_buf=path + '.h5',
key='df',
mode='w',
format='table',
complevel=9,
complib='bzip2')
df = pd.read_hdf(path_or_buf=path + '.h5',
key='df')
try:
os.remove(path + '.xml')
except:
pass
return df
class MyCustomMNISTUSPSDataLoader(torch.utils.data.Dataset):
datasets = []
transforms = None
def __init__(self, datasets, transforms: torchvision.transforms = None):
self.datasets = datasets
self.transforms = transforms
def __len__(self):
return sum(len(d) for d in self.datasets)
def __getitem__(self, idx: int):
if torch.is_tensor(idx):
idx = idx.tolist()
offset = 0
dataset_idx = 0
sample = None
if idx < len(self):
while sample is None:
if idx < (offset + len(self.datasets[dataset_idx])):
sample = self.datasets[dataset_idx][idx - offset]
else:
offset += len(self.datasets[dataset_idx])
dataset_idx += 1
else:
return None
x = sample[0]
for transform in self.transforms:
x = transform(x)
return {'x': x, 'y': sample[1]}
class MyCustomCIFAR10STL10DataLoader(torch.utils.data.Dataset):
datasets = []
transforms = None
resnet = None
samples = None
def __init__(self, datasets, transforms: torchvision.transforms = None):
self.datasets = []
self.resnet = torchvision.models.resnet18(pretrained=True)
self.resnet.eval()
self.resnet.fc_backup = self.resnet.fc
self.resnet.fc = torch.nn.Sequential()
if isinstance(self, CIFAR10):
for dataset in datasets:
idx_to_delete = np.where(np.array([dataset.targets]) == 6)[1]
dataset.targets = list(np.delete(np.array(dataset.targets), idx_to_delete))
dataset.data = np.delete(dataset.data, idx_to_delete, 0)
self.datasets.append(dataset)
elif isinstance(self, STL10):
for dataset in datasets:
idx_to_delete = np.where(np.array([dataset.labels]) == 7)[1]
dataset.labels = list(np.delete(np.array(dataset.labels), idx_to_delete))
dataset.data = np.delete(dataset.data, idx_to_delete, 0)
self.datasets.append(dataset)
self.transforms = transforms
def __len__(self):
return sum(len(d) for d in self.datasets)
def __getitem__(self, idx: int):
if torch.is_tensor(idx):
idx = idx.tolist()
offset = 0
dataset_idx = 0
sample = None
if idx < len(self):
while sample is None:
if idx < (offset + len(self.datasets[dataset_idx])):
sample = self.datasets[dataset_idx][idx - offset]
else:
offset += len(self.datasets[dataset_idx])
dataset_idx += 1
else:
return None
x = sample[0]
for transform in self.transforms:
x = transform(x)
x = x.unsqueeze(0)
if torch.cuda.is_available():
x = x.to('cuda')
self.resnet.to('cuda')
if isinstance(self, CIFAR10):
if sample[1] == 0:
y = 0 # Airplane
elif sample[1] == 1:
y = 1 # Automobile
elif sample[1] == 2:
y = 2 # Bird
elif sample[1] == 3:
y = 3 # Cat
elif sample[1] == 4:
y = 4 # Deer
elif sample[1] == 5:
y = 5 # Dog
elif sample[1] == 7:
y = 6 # Horse
elif sample[1] == 8:
y = 7 # Ship
elif sample[1] == 9:
y = 8 # Truck
elif isinstance(self, STL10):
if sample[1] == 0:
y = 0 # Airplane
elif sample[1] == 1:
y = 2 # Bird
elif sample[1] == 2:
y = 1 # Car
elif sample[1] == 3:
y = 3 # Cat
elif sample[1] == 4:
y = 4 # Deer
elif sample[1] == 5:
y = 5 # Dog
elif sample[1] == 6:
y = 6 # Horse
elif sample[1] == 8:
y = 7 # Ship
elif sample[1] == 9:
y = 8 # Truck
with torch.no_grad():
x = self.resnet(x)[0].to('cpu')
return {'x': x, 'y': y}
class USPS(MyCustomMNISTUSPSDataLoader):
def __init__(self, transform: torchvision.transforms = None):
ssl._create_default_https_context = ssl._create_unverified_context
datasets = []
datasets.append(torchvision.datasets.USPS(root='./data', train=True, download=True))
datasets.append(torchvision.datasets.USPS(root='./data', train=False, download=True))
MyCustomMNISTUSPSDataLoader.__init__(self, datasets, transform)
class MNIST(MyCustomMNISTUSPSDataLoader):
def __init__(self, transform: torchvision.transforms = None):
ssl._create_default_https_context = ssl._create_unverified_context
datasets = []
datasets.append(torchvision.datasets.MNIST(root='./data', train=True, download=True))
datasets.append(torchvision.datasets.MNIST(root='./data', train=False, download=True))
MyCustomMNISTUSPSDataLoader.__init__(self, datasets, transform)
class CIFAR10(MyCustomCIFAR10STL10DataLoader):
def __init__(self, transform: torchvision.transforms = None):
ssl._create_default_https_context = ssl._create_unverified_context
datasets = []
datasets.append(torchvision.datasets.CIFAR10(root='./data', train=True, download=True))
datasets.append(torchvision.datasets.CIFAR10(root='./data', train=False, download=True))
MyCustomCIFAR10STL10DataLoader.__init__(self, datasets, transform)
class STL10(MyCustomCIFAR10STL10DataLoader):
def __init__(self, transform: torchvision.transforms = None):
ssl._create_default_https_context = ssl._create_unverified_context
datasets = []
datasets.append(torchvision.datasets.STL10(root='./data', split='train', download=True))
datasets.append(torchvision.datasets.STL10(root='./data', split='test', download=True))
MyCustomCIFAR10STL10DataLoader.__init__(self, datasets, transform)
class DataManipulator:
data = None
__number_samples = None
__number_features = None
__number_classes = None
__padding = 0
concept_drift_noise = None
n_concept_drifts = 1
def concept_drift(self, x, idx):
if idx == 0:
return x
def normalize(x, a: int = 0, b: int = 1):
assert a < b
return (b - a) * (x - np.min(x)) / (np.max(x) - np.min(x)) + a
if self.concept_drift_noise is None:
self.concept_drift_noise = []
for i in range(self.n_concept_drifts - 1):
np.random.seed(seed=self.n_concept_drifts * self.n_concept_drifts + i)
self.concept_drift_noise.append((np.random.rand(self.number_features())) + 1) # Random on range [0, 2)
np.random.seed(seed=None)
return normalize(x * self.concept_drift_noise[idx - 1], np.min(x), np.max(x))
def number_classes(self, force_count: bool = False):
if self.__number_classes is None or force_count:
try:
self.__min_class = int(np.min([np.min(d.targets) for d in self.data.datasets]))
self.__max_class = int(np.max([np.max(d.targets) for d in self.data.datasets]))
except TypeError:
self.__min_class = int(np.min([np.min(d.targets.numpy()) for d in self.data.datasets]))
self.__max_class = int(np.max([np.max(d.targets.numpy()) for d in self.data.datasets]))
except AttributeError:
try:
self.__min_class = int(np.min(self.data.datasets.overall.values))
self.__max_class = int(np.max(self.data.datasets.overall.values))
except:
try:
self.__min_class = int(np.min(self.data.datasets.demand.values))
self.__max_class = int(np.max(self.data.datasets.demand.values))
except:
try:
self.__min_class = int(np.min(self.data.datasets.targets.values))
self.__max_class = int(np.max(self.data.datasets.targets.values))
except:
self.__min_class = int(np.min([np.min(d.labels) for d in self.data.datasets]))
self.__max_class = int(np.max([np.max(d.labels) for d in self.data.datasets]))
self.__number_classes = len(range(self.__min_class, self.__max_class + 1))
if isinstance(self.data, CIFAR10) or isinstance(self.data, STL10):
self.__number_classes = self.__number_classes - 1
return self.__number_classes
def number_features(self, force_count: bool = False, specific_sample: int = None):
if self.__number_features is None or force_count or specific_sample is not None:
if specific_sample is None:
idx = 0
else:
idx = specific_sample
self.__number_features = int(np.prod(self.get_x(idx).shape))
return self.__number_features
def number_samples(self, force_count: bool = False):
if self.__number_samples is None or force_count:
self.__number_samples = len(self.data)
return self.__number_samples
def get_x_from_y(self, y: int, idx: int = 0, random_idx: bool = False):
x = None
if random_idx:
while x is None:
idx = np.random.randint(0, self.number_samples())
temp_x, temp_y = self.get_x_y(idx)
if np.argmax(temp_y) == y:
x = temp_x
else:
while x is None:
temp_x, temp_y = self.get_x_y(idx)
if np.argmax(temp_y) == y:
x = temp_x
else:
idx += 1
return x
def get_x_y(self, idx: int):
data = self.data[idx]
if self.__padding > 0:
m = torch.nn.ConstantPad2d(self.__padding, 0)
x = m(data['x']).flatten().numpy()
else:
if type(data['x']) is np.ndarray:
x = data['x']
else:
x = data['x'].flatten().numpy()
y = np.zeros(self.number_classes())
y[int((data['y'] - self.__min_class))] = 1
x = self.concept_drift(x, int(idx / (self.number_samples() / self.n_concept_drifts)))
return x, y
def get_x(self, idx: int):
x, _ = self.get_x_y(idx)
return x
def get_y(self, idx: int):
_, y = self.get_x_y(idx)
return y
def load_mnist(self, resize: int = None, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
if resize is None:
self.data = MNIST([torchvision.transforms.ToTensor()])
else:
self.data = MNIST([torchvision.transforms.Resize(resize),
torchvision.transforms.ToTensor()])
def load_usps(self, resize: int = None, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
if resize is None:
self.data = USPS([torchvision.transforms.ToTensor()])
else:
self.data = USPS([torchvision.transforms.Resize(resize),
torchvision.transforms.ToTensor()])
def load_cifar10(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = CIFAR10([torchvision.transforms.Resize(224),
torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
])
def load_stl10(self, resize: int = None, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
if resize is None:
self.data = STL10([torchvision.transforms.Resize(224),
torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
])
def load_london_bike_sharing(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomBikeSharingDataLoader('london')
def load_washington_bike_sharing(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomBikeSharingDataLoader('washington')
def load_amazon_review_fashion(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('AMAZON_FASHION_5.json.gz')
def load_amazon_review_all_beauty(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('All_Beauty_5.json.gz')
def load_amazon_review_appliances(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Appliances_5.json.gz')
def load_amazon_review_arts_crafts_sewing(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Arts_Crafts_and_Sewing_5.json.gz')
def load_amazon_review_automotive(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Automotive_5.json.gz')
def load_amazon_review_books(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Books_5.json.gz')
def load_amazon_review_cds_vinyl(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('CDs_and_Vinyl_5.json.gz')
def load_amazon_review_cellphones_accessories(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Cell_Phones_and_Accessories_5.json.gz')
def load_amazon_review_clothing_shoes_jewelry(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Clothing_Shoes_and_Jewelry_5.json.gz')
def load_amazon_review_digital_music(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Digital_Music_5.json.gz')
def load_amazon_review_electronics(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Electronics_5.json.gz')
def load_amazon_review_gift_card(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Gift_Cards_5.json.gz')
def load_amazon_review_grocery_gourmet_food(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Grocery_and_Gourmet_Food_5.json.gz')
def load_amazon_review_home_kitchen(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Home_and_Kitchen_5.json.gz')
def load_amazon_review_industrial_scientific(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Industrial_and_Scientific_5.json.gz')
def load_amazon_review_kindle_store(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Kindle_Store_5.json.gz')
def load_amazon_review_luxury_beauty(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Luxury_Beauty_5.json.gz')
def load_amazon_review_magazine_subscription(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Magazine_Subscriptions_5.json.gz')
def load_amazon_review_movies_tv(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Movies_and_TV_5.json.gz')
def load_amazon_review_musical_instruments(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Musical_Instruments_5.json.gz')
def load_amazon_review_office_products(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Office_Products_5.json.gz')
def load_amazon_review_patio_lawn_garden(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Patio_Lawn_and_Garden_5.json.gz')
def load_amazon_review_pet_supplies(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Pet_Supplies_5.json.gz')
def load_amazon_review_prime_pantry(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Prime_Pantry_5.json.gz')
def load_amazon_review_software(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Software_5.json.gz')
def load_amazon_review_sports_outdoors(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Sports_and_Outdoors_5.json.gz')
def load_amazon_review_tools_home_improvements(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Tools_and_Home_Improvement_5.json.gz')
def load_amazon_review_toys_games(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Toys_and_Games_5.json.gz')
def load_amazon_review_video_games(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewDataLoader('Video_Games_5.json.gz')
def load_amazon_review_nips_books(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewNIPSDataLoader('books')
def load_amazon_review_nips_dvd(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewNIPSDataLoader('dvd')
def load_amazon_review_nips_electronics(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewNIPSDataLoader('electronics')
def load_amazon_review_nips_kitchen(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewNIPSDataLoader('kitchen')
def load_amazon_review_acl_apparel(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('apparel')
def load_amazon_review_acl_automotive(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('automotive')
def load_amazon_review_acl_baby(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('baby')
def load_amazon_review_acl_beauty(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('beauty')
def load_amazon_review_acl_books(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('books')
def load_amazon_review_acl_camera_photo(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('camera_photo')
def load_amazon_review_acl_cell_phones_service(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('cell_phones_service')
def load_amazon_review_acl_computer_video_games(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('computer_video_games')
def load_amazon_review_acl_dvd(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('dvd')
def load_amazon_review_acl_electronics(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('electronics')
def load_amazon_review_acl_gourmet_food(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('gourmet_food')
def load_amazon_review_acl_grocery(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('grocery')
def load_amazon_review_acl_health_personal_care(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('health_personal_care')
def load_amazon_review_acl_jewelry_watches(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('jewelry_watches')
def load_amazon_review_acl_kitchen_housewares(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('kitchen_housewares')
def load_amazon_review_acl_magazines(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('magazines')
def load_amazon_review_acl_music(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('music')
def load_amazon_review_acl_musical_instruments(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('musical_instruments')
def load_amazon_review_acl_office_products(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('office_products')
def load_amazon_review_acl_outdoor_living(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('outdoor_living')
def load_amazon_review_acl_software(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('software')
def load_amazon_review_acl_sports_outdoors(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('sports_outdoors')
def load_amazon_review_acl_tools_hardware(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('tools_hardware')
def load_amazon_review_acl_toys_games(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('toys_games')
def load_amazon_review_acl_video(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomAmazonReviewACLDataLoader('video')
def load_news_popularity_obama_all(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('obama', 'all')
def load_news_popularity_economy_all(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('economy', 'all')
def load_news_popularity_microsoft_all(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('microsoft', 'all')
def load_news_popularity_palestine_all(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('palestine', 'all')
def load_news_popularity_obama_facebook(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('obama', 'Facebook')
def load_news_popularity_economy_facebook(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('economy', 'Facebook')
def load_news_popularity_microsoft_facebook(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('microsoft', 'Facebook')
def load_news_popularity_palestine_facebook(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('palestine', 'Facebook')
def load_news_popularity_obama_googleplus(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('obama', 'GooglePlus')
def load_news_popularity_economy_googleplus(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('economy', 'GooglePlus')
def load_news_popularity_microsoft_googleplus(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('microsoft', 'GooglePlus')
def load_news_popularity_palestine_googleplus(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('palestine', 'GooglePlus')
def load_news_popularity_obama_linkedin(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('obama', 'LinkedIn')
def load_news_popularity_economy_linkedin(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('economy', 'LinkedIn')
def load_news_popularity_microsoft_linkedin(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('microsoft', 'LinkedIn')
def load_news_popularity_palestine_linkedin(self, n_concept_drifts: int = 1):
self.n_concept_drifts = n_concept_drifts
self.data = MyCustomNewsPopularityDataLoader('palestine', 'LinkedIn') | 44.822795 | 120 | 0.596488 | 53,475 | 0.992539 | 104 | 0.00193 | 413 | 0.007666 | 0 | 0 | 7,934 | 0.147261 |
dc40fe59f59a392bcc67c51aae722672e0bfc90e | 8,263 | py | Python | functions/email_habit_survey.py | jamesshapiro/aws-habit-tracker | bbae9866dc4ce744832e42d02a997fc9bebda517 | [
"MIT"
] | null | null | null | functions/email_habit_survey.py | jamesshapiro/aws-habit-tracker | bbae9866dc4ce744832e42d02a997fc9bebda517 | [
"MIT"
] | null | null | null | functions/email_habit_survey.py | jamesshapiro/aws-habit-tracker | bbae9866dc4ce744832e42d02a997fc9bebda517 | [
"MIT"
] | null | null | null | import os
import json
import boto3
import datetime
import hashlib
import secrets
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
table_name = os.environ['DDB_TABLE']
ses_client = boto3.client('ses')
ddb_client = boto3.client('dynamodb')
unsubscribe_url = os.environ['UNSUBSCRIBE_URL']
config_set_name = os.environ['CONFIG_SET_NAME']
months = {
'01': 'January',
'02': 'February',
'03': 'March',
'04': 'April',
'05': 'May',
'06': 'June',
'07': 'July',
'08': 'August',
'09': 'September',
'10': 'October',
'11': 'November',
'12': 'December'
}
days = {
'01':'1st', '02':'2nd',
'03':'3rd', '04':'4th',
'05':'5th', '06':'6th',
'07':'7th', '08':'8th',
'09':'9th', '10':'10th',
'11':'11th', '12':'12th',
'13':'13th', '14':'14th',
'15':'15th', '16':'16th',
'17':'17th', '18':'18th',
'19':'19th', '20':'20th',
'21':'21st', '22':'22nd',
'23':'23rd', '24':'24th',
'25':'25th', '26':'26th',
'27':'27th', '28':'28th',
'29':'29th', '30':'30th',
'31':'31st'
}
paginator = ddb_client.get_paginator('query')
sender = os.environ['SENDER']
def get_subscribers(event):
if 'user' in event:
return [event['user']]
response_iterator = paginator.paginate(
TableName=table_name,
KeyConditionExpression='#pk1=:pk1',
ExpressionAttributeNames={'#pk1':'PK1'},
ExpressionAttributeValues={':pk1':{'S':f'SUBSCRIBED'}}
)
subscribers = []
for items in response_iterator:
subscriber_page = [item['SK1']['S'] for item in items['Items']]
subscribers.extend(subscriber_page)
return subscribers
def get_token():
m = hashlib.sha256()
m.update(secrets.token_bytes(4096))
return m.hexdigest()
def lambda_handler(event, context):
three_days_from_now = int( time.time() ) + 259200
est_time_delta = datetime.timedelta(hours=5)
subscribers = get_subscribers(event)
print(f'{subscribers=}')
for subscriber in subscribers:
print(f'{subscriber=}')
print(f'Habit Survey <{sender}>')
now = datetime.datetime.now()
now -= est_time_delta
sha256 = get_token()
year = str(now.year)
day = str(now.day).zfill(2)
month = str(now.month).zfill(2)
survey_link = f'https://survey.githabit.com/?token={sha256}&date_string={year}-{month}-{day}'
ddb_client.put_item(
TableName=table_name,
Item={
'PK1': {'S': f'TOKEN'},
'SK1': {'S': f'TOKEN#{sha256}'},
'USER': {'S': f'USER#{subscriber}'},
'DATE_STRING': {'S': f'{year}-{month}-{day}'},
'TTL_EXPIRATION': {'N': str(three_days_from_now)}
}
)
unsubscribe_token = ddb_client.get_item(TableName=table_name,Key={'PK1': {'S': 'USER#USER'}, 'SK1': {'S': f'USER#{subscriber}'}})['Item']['UNSUBSCRIBE_TOKEN']['S']
unsubscribe_link = f'{unsubscribe_url}?token={unsubscribe_token}'
email_day = days[day]
email_month = months[month]
message = f"""
<!DOCTYPE html>
<html lang="en" xmlns="http://www.w3.org/1999/xhtml" xmlns:o="urn:schemas-microsoft-com:office:office">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
<meta name="x-apple-disable-message-reformatting">
<title></title>
<!--[if mso]>
<noscript>
<xml>
<o:OfficeDocumentSettings>
<o:PixelsPerInch>96</o:PixelsPerInch>
</o:OfficeDocumentSettings>
</xml>
</noscript>
<![endif]-->
<style>
table, td, div, h1, p {{font-family: Arial, sans-serif;}}
</style>
</head>
<body style="margin:0;padding:0;">
<table role="presentation" style="width:100%;border-collapse:collapse;border:0;border-spacing:0;background:#ffffff;">
<tr>
<td align="center" style="padding:0;">
<table role="presentation" style="width:602px;border-collapse:collapse;border:1px solid #cccccc;border-spacing:0;text-align:left;">
<tr>
<td align="center" style="padding:40px 0 30px 0;background:#70bbd9;">
<img src="https://cdkhabits-surveygithabitcombucket4f6ffd5a-1mwnd3a635op9.s3.amazonaws.com/cropped.png" alt="" width="300" style="height:auto;display:block;" />
</td>
</tr>
<tr>
<td style="padding:36px 30px 42px 30px;">
<table role="presentation" style="width:100%;border-collapse:collapse;border:0;border-spacing:0;">
<tr>
<td style="padding:0 0 36px 0;color:#153643;">
<h1 style="font-size:40px;margin:0 0 20px 0;font-family:Arial,sans-serif;">Today's Habit Survey!</h1>
<p style="margin:40px 0 0 0;font-size:36px;line-height:30px;font-family:Arial,sans-serif;">Click <a href="{survey_link}" style="font-weight:bold;color:#ee4c50;text-decoration:underline;">HERE</a> to fill it out
</td>
</tr>
<tr>
<td style="padding:0 0 0 0;color:#153643;">
<li style="margin:0 0 12px 0;font-size:30px;line-height:24px;font-family:Arial,sans-serif;">The survey expires 💣</a></li>
<li style="margin:0 0 12px 0;font-size:30px;line-height:24px;font-family:Arial,sans-serif;">Complete it before time runs out! ⏰</a></li>
<li style="margin:0 0 12px 0;font-size:30px;line-height:24px;font-family:Arial,sans-serif;">Depending on your timezone <span style="font-size:20px;">🌐</span>, it may take up to 24 hours for daily results to appear on the grid.</a></li>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td style="padding:30px;background:#ee4c50;">
<table role="presentation" style="width:100%;border-collapse:collapse;border:0;border-spacing:0;font-size:9px;font-family:Arial,sans-serif;">
<tr>
<td style="padding:0;width:50%;" align="left">
<p style="margin:0;font-size:14px;line-height:16px;font-family:Arial,sans-serif;color:#ffffff;">
GitHabit {year}<br/><a href="{unsubscribe_link}" style="color:#ffffff;text-decoration:underline;">Unsubscribe</a>
</p>
</td>
<td style="padding:0;width:50%;" align="right">
<table role="presentation" style="border-collapse:collapse;border:0;border-spacing:0;">
<tr>
<td style="padding:0 0 0 10px;width:38px;">
<a href="https://githabit.com/" style="color:#ffffff;"><img src="https://cdkhabits-surveygithabitcombucket4f6ffd5a-1mwnd3a635op9.s3.amazonaws.com/blue-rabbit.png" alt="GitHabit" width="38" style="height:auto;display:block;border:0;" /></a>
</td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>
"""
msg = MIMEMultipart()
msg["Subject"] = f'📆🐇 Habits Survey: {email_month} {email_day}, {year}'
#msg["From"] = sender
msg["From"] = f'GitHabit.com <{sender}>'
msg["To"] = subscriber
body_txt = MIMEText(message, "html")
msg.attach(body_txt)
msg['Reply-To'] = 'GitHabit <yes-reply@mail.githabit.com>'
mail_unsubscribe_link = 'mailto: unsubscribe@mail.githabit.com?subject=unsubscribe'
#msg['list-unsubscribe'] = f'<{mail_unsubscribe_link}>, <{unsubscribe_link}>'
msg.add_header('List-Unsubscribe', f'<{mail_unsubscribe_link}>, <{unsubscribe_link}>')
msg.add_header('List-Unsubscribe-Post', 'List-Unsubscribe=One-Click')
response = ses_client.send_raw_email(
Source = f'GitHabit.com <{sender}>',
Destinations = [subscriber],
RawMessage = {"Data": msg.as_string()},
ConfigurationSetName=config_set_name
)
print(f'{response=}')
return {
'statusCode': 200,
'body': 'shalom haverim!'
}
| 40.11165 | 265 | 0.572673 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,561 | 0.671862 |
dc410688a5a351759d55b32e0056ca7bc9e0f5bc | 2,120 | py | Python | doc/generate_examples_rst.py | stj/asynctest | 4b1284d6bab1ae90a6e8d88b882413ebbb7e5dce | [
"Apache-2.0"
] | null | null | null | doc/generate_examples_rst.py | stj/asynctest | 4b1284d6bab1ae90a6e8d88b882413ebbb7e5dce | [
"Apache-2.0"
] | null | null | null | doc/generate_examples_rst.py | stj/asynctest | 4b1284d6bab1ae90a6e8d88b882413ebbb7e5dce | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import fnmatch
import pathlib
import os.path
import re
import logging
logging.basicConfig(level=logging.INFO)
INCLUDED_SOURCES = ("*.py", )
EXCLUDED_SOURCES = ("__*__.py", )
INCLUDED_SOURCES_REGEX = tuple(re.compile(fnmatch.translate(pattern))
for pattern in INCLUDED_SOURCES)
EXCLUDED_SOURCES_REGEX = tuple(re.compile(fnmatch.translate(pattern))
for pattern in EXCLUDED_SOURCES)
def include_file(filename):
return (any(regex.match(filename) for regex in INCLUDED_SOURCES_REGEX) and
not any(regex.match(filename) for regex in EXCLUDED_SOURCES_REGEX))
def list_examples(src_dir):
examples = []
for dirname, _, filenames in os.walk(src_dir):
for filename in filenames:
if include_file(filename):
examples.append((pathlib.Path(dirname), filename))
index_contents = []
return sorted(examples)
def generate_examples_rst(src_dir="examples/"):
examples = list_examples(src_dir)
# Generate the index
logging.info("Creating index file")
with open(os.path.join(src_dir, "index.rst"), "w") as index:
index.write(
"List of code examples\n"
"---------------------\n"
"\n"
".. toctree::\n"
"\n"
)
for example_dirname, example_filename in examples:
example_pathname = os.path.join(
example_dirname.relative_to(src_dir),
example_filename)
rst_filename = os.path.join(src_dir, f"{example_pathname}.rst")
index.write(f" {example_pathname}\n")
logging.info("generating file for %s", example_pathname)
with open(rst_filename, "w") as example_rst:
example_rst.write(
f"``{example_pathname}``\n"
f"{'-' * (len(example_pathname) + 4)}\n\n"
f".. literalinclude:: {example_filename}\n"
)
logging.info("index and source file generated")
if __name__ == "__main__":
generate_examples_rst()
| 29.859155 | 79 | 0.601415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 404 | 0.190566 |
dc4169e69d3e627ccb8330a57d4c07a8100f55e9 | 1,609 | py | Python | ml-conversational-analytic-tool/runDataExtraction.py | difince/ml-conversational-analytic-tool | 69d04d4d49ec352ab8111d5ecade8b6703bea182 | [
"Apache-2.0"
] | 1 | 2022-01-18T19:55:17.000Z | 2022-01-18T19:55:17.000Z | ml-conversational-analytic-tool/runDataExtraction.py | mkbhanda/ml-conversational-analytic-tool | f8c1c46f1bfa24de7212ecfe2c811109ad338a00 | [
"Apache-2.0"
] | 1 | 2022-01-31T05:22:58.000Z | 2022-01-31T18:39:46.000Z | ml-conversational-analytic-tool/runDataExtraction.py | mkbhanda/ml-conversational-analytic-tool | f8c1c46f1bfa24de7212ecfe2c811109ad338a00 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
import os
from githubDataExtraction import GithubDataExtractor
def getRepos(access_token, organization, reaction):
"""
Method to extract data for all repositories in organization
"""
extractor = GithubDataExtractor(access_token) # Create object
repos = extractor.g_ses.get_organization(organization).get_repos()
for repo in repos:
print("Starting: {}".format(repo.name))
extractor.openRepo(organization, repo.name)
extractor.getAllPulls("", reaction)
def getRepo(access_token, organization, reponame, reaction):
"""
Method to extract data for an individual repository
"""
extractor = GithubDataExtractor(access_token) # Create object
extractor.openRepo(organization, reponame)
extractor.getAllPulls("", reaction)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Create csv for all pulls in each repo for the organzation')
parser.add_argument('organization', help='Organization the repo belongs to.')
parser.add_argument('repo', help='Repo name or all if all repos in organization')
parser.add_argument('-reactions', action='store_true', default=False, help='Flag to extract reactions')
args = parser.parse_args()
ACCESS_TOKEN = os.environ["GITACCESS"] # Access Github token from environment for security purposes
if args.repo == 'all':
getRepos(ACCESS_TOKEN, args.organization, args.reactions)
else:
getRepo(ACCESS_TOKEN, args.organization, args.repo, args.reactions)
| 37.418605 | 109 | 0.729646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 555 | 0.344935 |
dc417f56585a0b2838aab9795d09b29f9bf14f6a | 2,867 | py | Python | src/features/text/build_text_features.py | ClaasM/VideoArticleRetrieval | 1948887d34b53bdb7a2ec473b31401b395c54e63 | [
"MIT"
] | null | null | null | src/features/text/build_text_features.py | ClaasM/VideoArticleRetrieval | 1948887d34b53bdb7a2ec473b31401b395c54e63 | [
"MIT"
] | null | null | null | src/features/text/build_text_features.py | ClaasM/VideoArticleRetrieval | 1948887d34b53bdb7a2ec473b31401b395c54e63 | [
"MIT"
] | null | null | null | """
Reads (article_id, [tokens]) from tokens.pickle and writes:
(article_id, w2v)
(article_id, bow)
"""
import json
import sys
import os
import pickle
import psycopg2
from multiprocessing.pool import Pool
import numpy as np
import zlib
# from gensim.models import Word2Vec
from gensim.models import Word2Vec, KeyedVectors
import src
# W2V_FILE = os.environ["MODEL_PATH"] + "/word2vec.model"
from src.visualization.console import StatusVisualization
VOCABULARY_FILE = os.environ["DATA_PATH"] + "/interim/articles/vocabulary.pickle"
W2V_FILE = os.environ["MODEL_PATH"] + "/word2vec.model"
vocabulary = pickle.load(open(VOCABULARY_FILE, "rb"))
def init_worker():
global model
model = KeyedVectors.load(W2V_FILE)
def count_tokens(tokens):
token_counter = dict()
for word in vocabulary:
token_counter[word] = 0
for token in tokens:
if token in token_counter:
token_counter[token] += 1
counts = np.array([token_counter[token] for token in vocabulary], dtype=np.float32)
return zlib.compress(counts, 9)
def w2v_embed(tokens):
total = np.zeros(2048, dtype=np.float32)
for token in tokens:
if token in model: # Word2Vec model filters some token
total += model[token]
return zlib.compress(total / (len(tokens) or 1), 9)
MIN_TOKENS = 50
def extract_features(article):
article_id, tokens_string = article
tokens = json.loads(tokens_string)
if len(tokens) > MIN_TOKENS:
return "Success", article_id, count_tokens(tokens), w2v_embed(tokens)
else:
return "Too few tokens", article_id, None, None
def run():
conn = psycopg2.connect(database="video_article_retrieval", user="postgres")
article_cursor = conn.cursor()
update_cursor = conn.cursor()
article_cursor.execute("SELECT count(1) FROM articles WHERE text_extraction_status='Success'")
article_count, = article_cursor.fetchone()
# avoid loading all articles into memory.
article_cursor.execute("SELECT id, tokens FROM articles WHERE text_extraction_status='Success'")
crawling_progress = StatusVisualization(article_count, update_every=1000)
with Pool(8, initializer=init_worker) as pool:
for status, article_id, compressed_bow, compressed_w2v in pool.imap_unordered(extract_features, article_cursor):
if status == 'Success':
update_cursor.execute(
"UPDATE articles SET bow_2048=%s, w2v_2048=%s, feature_extraction_status='Success' WHERE id=%s",
[compressed_bow, compressed_w2v, article_id])
else:
update_cursor.execute(
"UPDATE articles SET feature_extraction_status=%s WHERE id=%s",
[status, article_id])
crawling_progress.inc()
conn.commit()
if __name__ == '__main__':
run()
| 31.163043 | 120 | 0.690617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 731 | 0.25497 |
dc420f3bc704858dc4746abe2044b41c94d6e866 | 212 | py | Python | 2. funcoes/angulo_de_regracao.py | andrebrito16/python-academy | 544516048c0a2f8cea42ef0f252b9c40e8f5b141 | [
"MIT"
] | 1 | 2021-08-19T19:40:14.000Z | 2021-08-19T19:40:14.000Z | 2. funcoes/angulo_de_regracao.py | andrebrito16/python-academy | 544516048c0a2f8cea42ef0f252b9c40e8f5b141 | [
"MIT"
] | null | null | null | 2. funcoes/angulo_de_regracao.py | andrebrito16/python-academy | 544516048c0a2f8cea42ef0f252b9c40e8f5b141 | [
"MIT"
] | null | null | null | from math import sin, radians, asin, degrees
def snell_descartes(n1, n2, teta1):
# n1*sin(teta1) = n2 * sin(teta2)
# teta2 = n1
teta2 = (n1 * sin(radians(teta1)))/n2
return degrees(asin(teta2))
| 23.555556 | 44 | 0.636792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.212264 |
dc426cc10b9151c44106da452cb4ef6089152e5e | 3,972 | py | Python | project/GUI/SlaveGUI/PresentationLayout.py | RemuTeam/Remu | a7d100ff9002b1b1d27249f8adf510b5a89c09e3 | [
"MIT"
] | 2 | 2017-09-18T11:04:38.000Z | 2017-09-25T17:23:21.000Z | project/GUI/SlaveGUI/PresentationLayout.py | RemuTeam/Remu | a7d100ff9002b1b1d27249f8adf510b5a89c09e3 | [
"MIT"
] | 26 | 2017-09-20T09:11:10.000Z | 2017-12-11T12:21:56.000Z | project/GUI/SlaveGUI/PresentationLayout.py | RemuTeam/Remu | a7d100ff9002b1b1d27249f8adf510b5a89c09e3 | [
"MIT"
] | null | null | null | from kivy.app import App
from kivy.properties import StringProperty
from kivy.uix.screenmanager import Screen
from Constants.ContentType import ContentType
from Domain.PresentationElement import PresentationElement
class PresentationLayout(Screen):
"""
Fullscreen layout for presenting content
"""
image_source = StringProperty('')
text_element = StringProperty('')
video_source = StringProperty('')
def __init__(self, **kwargs):
"""
In the constructor the class and instance are passed
to the superclass' __init__ function
"""
super(PresentationLayout, self).__init__(**kwargs)
self.slave = None
self.start_screen = PresentationElement(ContentType.Image, "background/black_as_kivys_soul.png")
def go_back(self):
print("Ugh. Fine.")
self.slave.presentation_ended = True
self.reset_presentation()
def on_pre_enter(self, *args):
"""
Called when the transition to this screen starts
:param args:
:return:
"""
self.hide_widgets()
def on_enter(self, *args):
"""
Called when the transition to this screen is in progress
:param args:
:return:
"""
self.slave = App.get_running_app().servicemode
self.slave.set_layout(self)
self.set_visible_widget(self.start_screen)
self.slave.reset_presentation()
def init_presentation(self):
"""
Do not delete! Called when starting presentation if the slave is already in presentation
:return: Nothing
"""
pass
def set_visible_widget(self, element):
"""
Sets the visible widget according to the PresentationElement given as parameter
:param element: a PresentationElement object
"""
self.hide_widgets()
if element.element_type == ContentType.Text:
self.text_element = element.get_content()
self.show_widget(self.ids.text_field)
elif element.element_type == ContentType.Image:
self.image_source = element.get_content()
self.show_widget(self.ids.picture)
elif element.element_type == ContentType.Video:
self.video_source = element.get_content()
self.show_widget(self.ids.video)
self.ids.video.state = 'play'
def show_widget(self, widget):
"""
Shows a given widget. Unlike in the hiding, height doesn't need to be modified when showing widget. Otherwise
acts as a counter-method for the hide_widget method.
:param widget: the widget to show
:return: Nothing
"""
widget.opacity = 1
widget.size_hint_y = 1
def hide_widgets(self):
"""
Hides all of the different type of PresentationElement widgets.
:return: Nothing
"""
self.hide_widget(self.ids.picture)
self.hide_widget(self.ids.text_field)
self.hide_widget(self.ids.video)
def hide_widget(self, widget):
"""
Hides a widget. Size_hint_y and height are set to zero, so that the widgets do not take up space in the screen.
Opacity is also set to zero, in case the widget would be still visible (text-elements need this to be hidden
properly)
:param widget: the Widget to hide
:return: Nothing
"""
widget.opacity = 0
widget.size_hint_y = 0
widget.height = '0dp'
def reset_presentation(self):
"""
Resets the presentation to the starting state
"""
self.ids.picture.source = ''
self.get_root_window().show_cursor = True
self.slave.reset_presentation()
self.parent.get_screen('slave_gui_layout').set_info_text("Presentation ended\nCurrently in slave mode")
App.get_running_app().root.change_screen_to("slave_gui_layout")
def error(self, message, exception):
pass
| 33.661017 | 119 | 0.641994 | 3,754 | 0.945116 | 0 | 0 | 0 | 0 | 0 | 0 | 1,626 | 0.409366 |
dc445375bb74810932aacb859d24dca941cfcf06 | 1,451 | py | Python | src/api/serializers.py | mp5maker/djangoninja | ba87bbf0b62a6842087e6cc4de087456bde3a06b | [
"MIT"
] | null | null | null | src/api/serializers.py | mp5maker/djangoninja | ba87bbf0b62a6842087e6cc4de087456bde3a06b | [
"MIT"
] | 5 | 2020-06-05T19:24:11.000Z | 2022-03-11T23:33:29.000Z | src/api/serializers.py | mp5maker/djangoninja | ba87bbf0b62a6842087e6cc4de087456bde3a06b | [
"MIT"
] | null | null | null | from rest_framework.serializers import (
ModelSerializer,
HyperlinkedIdentityField,
SerializerMethodField,
ValidationError,
)
from django_elasticsearch_dsl_drf.serializers import DocumentSerializer
from .documents import (
ArticleDocument
)
from django.contrib.auth.models import User
from articles.models import Article
class UserSerializer(ModelSerializer):
class Meta:
model = User
fields = (
'id',
'last_login',
'is_superuser',
'username',
'first_name',
'last_name',
'email',
'is_staff',
'is_active',
'date_joined',
)
class ArticleSerializer(ModelSerializer):
author = SerializerMethodField()
author_url = HyperlinkedIdentityField(
view_name="api:user-detail-view",
lookup_field="id",
)
class Meta:
model = Article
fields = (
'id',
'slug',
'title',
'body',
'date',
'thumbnail',
'author',
'author_url',
)
def get_author(self, obj):
return str(obj.author.username)
class ArticleDocumentSerializer(DocumentSerializer):
class Meta:
document = ArticleDocument
field = (
'id',
'slug',
'title',
'body',
'date'
)
| 21.338235 | 71 | 0.53756 | 1,100 | 0.758098 | 0 | 0 | 0 | 0 | 0 | 0 | 219 | 0.15093 |
dc4480755cafccdab8d724bc7a489422401e8ed4 | 2,054 | py | Python | apps/core/services/variables.py | Praetorian-Defence/praetorian-api | 181fa22b043e58b2ac9c5f4eae4c3471a44c9bf4 | [
"MIT"
] | 2 | 2020-06-29T15:12:04.000Z | 2020-10-13T14:18:21.000Z | apps/core/services/variables.py | Praetorian-Defence/praetorian-api | 181fa22b043e58b2ac9c5f4eae4c3471a44c9bf4 | [
"MIT"
] | 10 | 2021-01-04T11:33:38.000Z | 2021-05-07T10:23:48.000Z | apps/core/services/variables.py | zurek11/praetorian-api | 181fa22b043e58b2ac9c5f4eae4c3471a44c9bf4 | [
"MIT"
] | null | null | null | from http import HTTPStatus
from django.http import HttpRequest
from django.utils.translation import gettext_lazy as _
from apps.api.errors import ApiException
from apps.core.models import ApiKey
class VariablesService(object):
def __init__(
self,
request: HttpRequest,
variables: dict
):
self._request = request
self._variables = variables
if hasattr(self._request, 'api_key'):
self._api_key = self._request.api_key
else:
raise ApiException(
self._request, _('Request does not have any api_key.'), status_code=HTTPStatus.NOT_FOUND
)
@classmethod
def create(
cls,
request: HttpRequest,
variables: dict
) -> 'VariablesService':
return VariablesService(request, variables)
def get_variables(self) -> dict:
variables = {}
if self._api_key.type == ApiKey.ApiKeyType.PROXY_CLIENT:
variables = self._get_all_variables(self._variables)
elif self._api_key.type == ApiKey.ApiKeyType.USER_CLIENT or ApiKey.ApiKeyType.DEBUG:
variables = self._get_public_variables(self._variables)
return variables
def _get_all_variables(self, variables: dict):
dictionary = {}
for k, v in variables.items():
if isinstance(v['value'], dict):
dictionary[k] = self._get_all_variables(v['value'])
else:
dictionary[k] = v['value']
return dictionary
def _get_public_variables(self, variables: dict):
dictionary = {}
for k, v in variables.items():
if isinstance(v['value'], dict):
if v['hidden']:
dictionary[k] = None
else:
dictionary[k] = self._get_public_variables(v['value'])
else:
if v['hidden']:
dictionary[k] = None
else:
dictionary[k] = v['value']
return dictionary
| 28.929577 | 104 | 0.581305 | 1,853 | 0.902142 | 0 | 0 | 176 | 0.085686 | 0 | 0 | 121 | 0.058909 |
dc451d66d8b76f9552de78d6f8265ea16b66e274 | 3,007 | py | Python | pm4pygpu/format.py | mnghiap/pm4pygpu | 3cd0dbcbf01377c3a9af5f748ef993c095de6b41 | [
"Apache-2.0"
] | null | null | null | pm4pygpu/format.py | mnghiap/pm4pygpu | 3cd0dbcbf01377c3a9af5f748ef993c095de6b41 | [
"Apache-2.0"
] | null | null | null | pm4pygpu/format.py | mnghiap/pm4pygpu | 3cd0dbcbf01377c3a9af5f748ef993c095de6b41 | [
"Apache-2.0"
] | null | null | null | from pm4pygpu.constants import Constants
from numba import cuda
import numpy as np
def post_grouping_function(custom_column_activity_code, custom_column_timestamp, custom_column_case_idx, custom_column_pre_activity_code, custom_column_pre_timestamp, custom_column_pre_case, custom_column_variant_number, custom_column_ev_in_case_idx):
for i in range(cuda.threadIdx.x, len(custom_column_activity_code), cuda.blockDim.x):
custom_column_variant_number[i] = (len(custom_column_activity_code) + i + 1) * (custom_column_activity_code[i] + 1)
custom_column_ev_in_case_idx[i] = i
if i > 0:
custom_column_pre_activity_code[i] = custom_column_activity_code[i-1]
custom_column_pre_timestamp[i] = custom_column_timestamp[i-1]
custom_column_pre_case[i] = custom_column_case_idx[i-1]
else:
custom_column_pre_case[i] = -1
def post_filtering(df):
cdf = df.groupby(Constants.TARGET_CASE_IDX)
df = cdf.apply_grouped(post_grouping_function, incols=[Constants.TARGET_ACTIVITY_CODE, Constants.TARGET_TIMESTAMP, Constants.TARGET_CASE_IDX], outcols={Constants.TARGET_PRE_ACTIVITY: np.int32, Constants.TARGET_PRE_TIMESTAMP: np.int32, Constants.TARGET_PRE_CASE: np.int32, Constants.TARGET_VARIANT_NUMBER: np.int32, Constants.TARGET_EV_IN_CASE_IDX: np.int32})
df[Constants.TIMESTAMP_DIFF] = df[Constants.TARGET_TIMESTAMP] - df[Constants.TARGET_PRE_TIMESTAMP]
return df
def prefix_columns(df):
columns = list(df.columns)
columns = [x.replace("AAA", ":") for x in columns]
df.columns = columns
return df
def apply(df, case_id="case:concept:name", activity_key="concept:name", timestamp_key="time:timestamp", resource_key="org:resource"):
df = prefix_columns(df)
df[Constants.TARGET_ACTIVITY] = df[activity_key].astype("category")
df[Constants.TARGET_ACTIVITY_CODE] = df[Constants.TARGET_ACTIVITY].cat.codes
df[Constants.TARGET_TIMESTAMP] = df[timestamp_key].astype("int") // 10**6
df[Constants.TARGET_TIMESTAMP + "_2"] = df[Constants.TARGET_TIMESTAMP]
df[Constants.TARGET_EV_IDX] = df.index.astype("int")
if resource_key is not None:
df[Constants.TARGET_RESOURCE] = df[resource_key].astype("category")
df[Constants.TARGET_RESOURCE_IDX] = df[Constants.TARGET_RESOURCE].cat.codes
df = df.sort_values([Constants.TARGET_TIMESTAMP, Constants.TARGET_EV_IDX]).reset_index()
df[Constants.TARGET_CASE_IDX] = df[case_id].astype("category").cat.codes
#df = df.sort_values([Constants.TARGET_CASE_IDX, Constants.TARGET_TIMESTAMP, Constants.TARGET_EV_IDX]).reset_index()
df[Constants.TARGET_EV_IDX] = df.index.astype("int")
df[Constants.TARGET_EV_IDX] = df[Constants.TARGET_EV_IDX] + 1
mult_fact = df[Constants.TARGET_EV_IDX].max() + 2
df[Constants.TARGET_EV_CASE_MULT_ID] = df[Constants.TARGET_CASE_IDX].astype(np.int32) + 1
df[Constants.TARGET_EV_CASE_MULT_ID] = mult_fact * df[Constants.TARGET_EV_CASE_MULT_ID]
df[Constants.TARGET_EV_CASE_MULT_ID] = df[Constants.TARGET_EV_CASE_MULT_ID] + df[Constants.TARGET_EV_IDX]
return post_filtering(df)
| 61.367347 | 360 | 0.790156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.078816 |
dc45afb348eda7c0c6cebd9eec9eaba64ea60075 | 268 | py | Python | userserver/userserver_app/main_app/urls.py | tuvrai/votechain | 413d8892d7ede1e195a026978173675031d0f924 | [
"MIT"
] | null | null | null | userserver/userserver_app/main_app/urls.py | tuvrai/votechain | 413d8892d7ede1e195a026978173675031d0f924 | [
"MIT"
] | null | null | null | userserver/userserver_app/main_app/urls.py | tuvrai/votechain | 413d8892d7ede1e195a026978173675031d0f924 | [
"MIT"
] | null | null | null | from django.urls import path, register_converter
from . import views
from django.shortcuts import redirect
app_name = 'main_app'
urlpatterns = [
path('', lambda request: redirect('voting/')),
path('voting/',
views.voting,
name='voting'),
]
| 20.615385 | 50 | 0.66791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.141791 |
dc4929512e3496ffaec651ee8fa942aa3edc37ed | 3,402 | py | Python | database.py | DevEliran/PokeAPI | 0e1b983d8db256c8a34449b49f1a7ec28cb12208 | [
"MIT"
] | null | null | null | database.py | DevEliran/PokeAPI | 0e1b983d8db256c8a34449b49f1a7ec28cb12208 | [
"MIT"
] | null | null | null | database.py | DevEliran/PokeAPI | 0e1b983d8db256c8a34449b49f1a7ec28cb12208 | [
"MIT"
] | null | null | null | from database_utils import PokeDatabase, DB_FILENAME
def get_poke_by_name(poke_name: str) -> dict:
with PokeDatabase(DB_FILENAME) as cursor:
cursor.execute('''SELECT name, type1, type2, sum_stats,
hp, attack, special_attack, defense,
special_defense FROM Pokemons WHERE name = ?''',
(poke_name,))
x = cursor.fetchone()
return x
def get_poke_by_type(type1: str, type2: str = None) -> list:
with PokeDatabase(DB_FILENAME) as cursor:
if type2:
cursor.execute('''
SELECT name, type1, type2, sum_stats,
hp, attack, special_attack, defense,
special_defense FROM Pokemons WHERE type1 = ? AND type2 = ?''',
(type1, type2))
else:
cursor.execute('''
SELECT name, type1, type2, sum_stats,
hp, attack, special_attack, defense,
special_defense FROM Pokemons WHERE type1 = ?''', (type1,))
return cursor.fetchall()
def add_poke_to_db(name: str, type1: str, type2: str, sum_stats: int, hp: int,
attack: int, special_attack: int, defense: int,
special_defense: int) -> None:
with PokeDatabase(DB_FILENAME) as cursor:
cursor.execute('''
INSERT INTO Pokemons ('name', 'type1', 'type2', 'sum_stats',
'hp', 'attack', 'special_attack', 'defense', 'special_defense')
VALUES (?,?,?,?,?,?,?,?,?)''', (name, type1, type2, sum_stats,
hp, attack, special_attack, defense,
special_defense))
def update_poke(name: str, type1: str = None, type2: str = None,
sum_stats: int = None, hp: int = None, attack: int = None,
special_attack: int = None, defense: int = None,
special_defense: int = None) -> None:
params = [type1, type2, sum_stats, hp, attack, special_attack,
defense, special_defense]
params_names = ['type1', 'type2', 'sum_stats', 'hp', 'attack',
'special_attack', 'defense', 'special_defense']
with PokeDatabase(DB_FILENAME) as cursor:
for param, param_name in zip(params, params_names):
if param:
query = '''
UPDATE Pokemons SET ''' + param_name + '''
= ? WHERE name = ?'''
cursor.execute(query, (param, name))
def delete_poke(name: str) -> None:
with PokeDatabase(DB_FILENAME) as cursor:
cursor.execute('''
DELETE FROM Pokemons WHERE name = ?''', (name,))
def get_poke_by_stats_above_or_below(hp: int, attack: int, sattack: int,
defense: int, sdefense: int, above: bool) -> list:
params = [hp, attack, sattack, defense, sdefense]
params_names = ['hp', 'attack', 'special_attack', 'defense', 'special_defense']
operator = " > " if above else " < "
query = '''SELECT name, type1, type2, sum_stats,
hp, attack, special_attack, defense,
special_defense FROM Pokemons WHERE '''
for param, param_name in zip(params, params_names):
if param:
query += param_name + operator + str(param) + " AND "
query = query[:-len(" AND ")]
with PokeDatabase(DB_FILENAME) as cursor:
cursor.execute(query)
return cursor.fetchall() | 39.55814 | 83 | 0.569371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,099 | 0.323045 |
dc49d0efef00a025032ec403a6bd5a00b82ee89b | 140 | py | Python | python/module/calc/example.py | wjiec/packages | 4ccaf8f717265a1f8a9af533f9a998b935efb32a | [
"MIT"
] | null | null | null | python/module/calc/example.py | wjiec/packages | 4ccaf8f717265a1f8a9af533f9a998b935efb32a | [
"MIT"
] | 1 | 2016-09-15T07:06:15.000Z | 2016-09-15T07:06:15.000Z | python/module/calc/example.py | wjiec/packages | 4ccaf8f717265a1f8a9af533f9a998b935efb32a | [
"MIT"
] | null | null | null | #!/usr/bin/python35
import calc
from calc import mult
print(calc.add(1, 2))
print(calc.dec(2, 3))
print(calc.div(1, 2))
print(mult(2, 3))
| 14 | 21 | 0.678571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.135714 |
dc4bf0a06281c947e4e62456a19c478fe9e93906 | 223 | py | Python | asva/restoring_force/Elastic.py | adc21/asva | b94db4aa41ee10ef1991ed4f042c91d9e310a1e1 | [
"MIT"
] | 4 | 2020-10-07T10:47:07.000Z | 2020-12-23T10:07:12.000Z | asva/restoring_force/Elastic.py | adc21/asva | b94db4aa41ee10ef1991ed4f042c91d9e310a1e1 | [
"MIT"
] | 2 | 2021-06-11T12:41:23.000Z | 2021-08-04T03:05:31.000Z | asva/restoring_force/Elastic.py | adc21/asva | b94db4aa41ee10ef1991ed4f042c91d9e310a1e1 | [
"MIT"
] | null | null | null | from asva.restoring_force.RestoringForce import RestoringForce
class Elastic(RestoringForce):
def step(self, dis: float) -> None:
# init
self.init_step(dis)
# end
self.end_step(self.k0)
| 24.777778 | 62 | 0.659193 | 158 | 0.70852 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.049327 |
dc4c5dd9ba707c06e5166654025500e610de5ec9 | 1,169 | py | Python | ann_three_body/physics/constants.py | mruijzendaal/python_ann_three_body | 9380b0f7e4eaf7f481d8cdf34250e8475fe32a24 | [
"MIT"
] | null | null | null | ann_three_body/physics/constants.py | mruijzendaal/python_ann_three_body | 9380b0f7e4eaf7f481d8cdf34250e8475fe32a24 | [
"MIT"
] | 4 | 2020-11-13T18:44:52.000Z | 2022-02-10T01:35:23.000Z | ann_three_body/physics/constants.py | mruijzendaal/python_ann_three_body | 9380b0f7e4eaf7f481d8cdf34250e8475fe32a24 | [
"MIT"
] | null | null | null | G = 6.67408e-11 # N-m2/kg2
#
# Normalize the constants such that m, r, t and v are of the order 10^1.
#
def get_normalization_constants_alphacentauri():
# Normalize the masses to the mass of our sun
m_nd = 1.989e+30 # kg
# Normalize distances to the distance between Alpha Centauri A and Alpha Centauri B
r_nd = 5.326e+12 # m
# Normalize velocities to the velocity of earth around the sun
v_nd = 30000 # m/s
# Normalize time to the orbital period of Alpha Centauri A and B
t_nd = 79.91 * 365 * 24 * 3600 * 0.51 # s
return m_nd, r_nd, v_nd, t_nd
def get_normalization_constants_earthsun():
# Normalize the masses to the mass of our sun
m_nd = 1.989e+30 # kg
# Normalize distances to the distance between Earth and Sun
r_nd = 149.47e9 # m
# Normalize velocities to the velocity of earth around the sun
v_nd = 29.78e3 # m/s
# Normalize time to the orbital period of Earth and Sun
t_nd = 365 * 24 * 3600 * 0.51 # s
return m_nd, r_nd, v_nd, t_nd
def normalization_constants_none():
return 1, 1, 1, 1
m_nd, r_nd, v_nd, t_nd = get_normalization_constants_alphacentauri()
| 25.413043 | 87 | 0.674936 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 591 | 0.50556 |
dc4ccdcff5f2446b05828e69bf993a6f11ed8830 | 1,118 | py | Python | assigner/roster_util.py | joshessman/assigner | 1654298d200707475fc8b34813e6b5dde9a930f6 | [
"MIT"
] | 24 | 2016-07-28T23:08:20.000Z | 2021-09-03T04:07:23.000Z | assigner/roster_util.py | joshessman/assigner | 1654298d200707475fc8b34813e6b5dde9a930f6 | [
"MIT"
] | 111 | 2016-03-02T05:21:19.000Z | 2022-03-14T03:28:04.000Z | assigner/roster_util.py | joshessman/assigner | 1654298d200707475fc8b34813e6b5dde9a930f6 | [
"MIT"
] | 7 | 2016-09-03T08:41:38.000Z | 2021-10-06T03:08:48.000Z | from assigner.backends.base import RepoError
from assigner.config import DuplicateUserError
import logging
logger = logging.getLogger(__name__)
def get_filtered_roster(roster, section, target):
if target:
roster = [s for s in roster if s["username"] == target]
elif section:
roster = [s for s in roster if s["section"] == section]
if not roster:
raise ValueError("No matching students found in roster.")
return roster
def add_to_roster(
conf, backend, roster, name, username, section, force=False, canvas_id=None
):
student = {
"name": name,
"username": username,
"section": section,
}
logger.debug("%s", roster)
if not force and any(filter(lambda s: s["username"] == username, roster)):
raise DuplicateUserError("Student already exists in roster!")
try:
student["id"] = backend.repo.get_user_id(username, conf.backend)
except RepoError:
logger.warning("Student %s does not have a Gitlab account.", name)
if canvas_id:
student["canvas-id"] = canvas_id
roster.append(student)
| 26.619048 | 79 | 0.660107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.170841 |
dc4d10789c6e7024e9885910b0c59a9ecdf28e4d | 5,721 | py | Python | examples/knapsack01.py | rawg/levis | 33cd6c915f51134f79f3586dc0e4a6072247b568 | [
"MIT"
] | 42 | 2016-06-29T21:13:02.000Z | 2022-01-23T03:23:59.000Z | examples/knapsack01.py | rawg/levis | 33cd6c915f51134f79f3586dc0e4a6072247b568 | [
"MIT"
] | null | null | null | examples/knapsack01.py | rawg/levis | 33cd6c915f51134f79f3586dc0e4a6072247b568 | [
"MIT"
] | 12 | 2016-07-18T20:46:55.000Z | 2021-06-13T16:08:37.000Z | """
Genetic solution to the 0/1 Knapsack Problem.
usage: knapsack01.py [-h] [--data-file DATA_FILE]
[--population-size POPULATION_SIZE]
[--iterations MAX_ITERATIONS] [--mutation MUTATION_PROB]
[--crossover CROSSOVER_PROB] [--seed SEED]
[--stats-file STATS_FILE]
[--population-file POPULATION_FILE] [--verbose]
[--elitism ELITISM] [--uniform_cx] [--generate]
[--items NUM_ITEMS]
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
#from builtins import str
#from builtins import range
import math
import random
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
from levis import (configuration, crossover, mutation, FitnessLoggingGA,
ProportionateGA, ElitistGA)
class Knapsack01GA(ElitistGA, ProportionateGA, FitnessLoggingGA):
"""Genetic solution to the 0/1 Knapsack Problem."""
def __init__(self, config={}):
"""Initialize a 0/1 knapsack solver.
Raises:
AttributeError: If ``items`` is not in the configuration dict.
"""
super(self.__class__, self).__init__(config)
self.max_weight = self.config.setdefault("max_weight", 15)
self.items = self.config.setdefault("items", [])
self.chromosome_length = len(self.items)
self.uniform_cx = config.setdefault("uniform_cx", False)
for i, item in enumerate(self.items):
item["mask"] = 2 ** i
def assess(self, chromosome):
"""Return a 2-tuple of the total weight and value of a chromosome."""
weight = 0
value = 0
# pylint: disable=unused-variable
for locus, _ in enumerate(self.items):
if chromosome & 2 ** locus:
item = self.items[locus]
weight += item["weight"]
value += item["value"]
return (weight, value)
def score(self, chromosome):
weight, value = self.assess(chromosome)
if weight > self.max_weight:
return 0.0
return value
def create(self):
# The below generates chromosomes, but the majority are too heavy
# return random.randint(0, 2 ** self.chromosome_length - 1)
items = list(self.items)
random.shuffle(items)
weight = 0
chromosome = 0
for i in items:
if weight + i["weight"] <= self.max_weight:
weight += i["weight"]
chromosome |= i["mask"]
return chromosome
def crossover(self):
parent1 = self.select()
parent2 = self.select()
length = self.chromosome_length
if self.uniform_cx:
return crossover.uniform_bin(parent1, parent2, length)
else:
return crossover.single_point_bin(parent1, parent2, length)
def mutate(self, chromosome):
return mutation.toggle(chromosome, self.chromosome_length,
self.mutation_prob)
def chromosome_str(self, chromosome):
sack = []
for locus, _ in enumerate(self.items):
item = self.items[locus]["name"]
packed= 0
if chromosome & 2 ** locus:
packed = 1
sack.append("%s: %i" % (item, packed))
weight, value = self.assess(chromosome)
vals = (weight, value, ", ".join(sack))
return "{weight: %0.2f, value: %0.2f, contents: [%s]}" % vals
def chromosome_repr(self, chromosome):
return bin(chromosome)[2:].zfill(self.chromosome_length)
def create_data(config={}):
"""Create data and write to a JSON file."""
max_weight = config.setdefault("max_weight", 15)
items = []
if "num_items" in config:
num_items = config["num_items"]
del config["num_items"]
else:
num_items = 32
# Generate items
digits = int(math.ceil(math.log(num_items, 16)))
fmt = "%0" + str(digits) + "X"
for i in range(0, num_items):
name = fmt % (i + 1)
weight = random.triangular(1.0, max_weight // 3, max_weight)
value = random.random() * 100
items.append({"name": name, "weight": weight, "value": value})
config["items"] = items
configuration.write_file(config)
def main():
"""Main method to parse args and run."""
defaults = {
"population_size": 10,
"max_iterations": 10,
"elitism_pct": 0.01,
"population_file": "population.log",
"stats_file": "stats.csv"
}
description = "Genetic solution to the 0/1 Knapsack Problem"
parent = [Knapsack01GA.arg_parser()]
parser = configuration.get_parser(description, "knapsack01.json", parent)
parser.add_argument("--uniform_cx", action="store_true",
help="Use uniform crossover instead of single-point")
parser.add_argument("--generate", action="store_true",
help="Generate and store problem data")
group = parser.add_argument_group("data generation options")
group.add_argument("--items", type=int, dest="num_items", default=32,
help="Number of items to generate")
args = configuration.read_args(parser)
if args["generate"]:
del args["generate"]
create_data(args)
else:
config_file = configuration.read_file(args)
config = configuration.merge(defaults, config_file, args)
solver = Knapsack01GA(config)
solver.solve()
print(solver.chromosome_str(solver.best()))
if __name__ == "__main__":
main()
| 30.110526 | 81 | 0.59133 | 2,772 | 0.484531 | 0 | 0 | 0 | 0 | 0 | 0 | 1,752 | 0.30624 |
dc4f4a63f74db3ef68e4b3d83bc56a1482fafa87 | 2,456 | py | Python | Tests/Plot/LamWind/test_Slot_60_plot.py | PMSMcqut/pyleecan-of-manatee | 3efa06e8bc53c81a3e35457c108290e1d9ec1373 | [
"Apache-2.0"
] | 2 | 2020-06-29T13:48:37.000Z | 2021-06-15T07:34:05.000Z | Tests/Plot/LamWind/test_Slot_60_plot.py | PMSMcqut/pyleecan-of-manatee | 3efa06e8bc53c81a3e35457c108290e1d9ec1373 | [
"Apache-2.0"
] | null | null | null | Tests/Plot/LamWind/test_Slot_60_plot.py | PMSMcqut/pyleecan-of-manatee | 3efa06e8bc53c81a3e35457c108290e1d9ec1373 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@date Created on Tue Jan 12 13:54:56 2016
@copyright (C) 2015-2016 EOMYS ENGINEERING.
@author pierre_b
"""
from os.path import join
from unittest import TestCase
import matplotlib.pyplot as plt
from numpy import array, pi, zeros
from pyleecan.Classes.Frame import Frame
from pyleecan.Classes.LamSlotWind import LamSlotWind
from pyleecan.Classes.LamSquirrelCage import LamSquirrelCage
from pyleecan.Classes.Machine import Machine
from pyleecan.Classes.Shaft import Shaft
from pyleecan.Classes.VentilationCirc import VentilationCirc
from pyleecan.Classes.VentilationPolar import VentilationPolar
from pyleecan.Classes.VentilationTrap import VentilationTrap
from pyleecan.Classes.Winding import Winding
from pyleecan.Classes.WindingUD import WindingUD
from pyleecan.Classes.WindingCW2LT import WindingCW2LT
from pyleecan.Classes.WindingDW2L import WindingDW2L
from pyleecan.Classes.BHCurveMat import BHCurveMat
from pyleecan.Classes.BHCurveParam import BHCurveParam
from pyleecan.Classes.MatLamination import MatLamination
from pyleecan.Classes.SlotW60 import SlotW60
from pyleecan.Tests.Plot import save_path
from pyleecan.Tests.Plot.LamWind import wind_mat
class test_Lam_Wind_60_plot(TestCase):
"""unittest for Lamination with winding plot"""
def test_Lam_Wind_60(self):
"""Test machine plot with Slot 60
"""
print("\nTest plot Slot 60")
plt.close("all")
test_obj = Machine()
test_obj.rotor = LamSlotWind(
Rint=0, Rext=0.1325, is_internal=True, is_stator=False, L1=0.9
)
test_obj.rotor.slot = SlotW60(
Zs=12,
W1=25e-3,
W2=12.5e-3,
H1=20e-3,
H2=20e-3,
R1=0.1325,
H3=2e-3,
H4=1e-3,
W3=2e-3,
)
test_obj.rotor.winding = WindingCW2LT(qs=3, p=3, Lewout=60e-3)
plt.close("all")
test_obj.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Wind_s60_1-Rotor.png"))
# 1 for Lam, Zs*2 for wind
self.assertEqual(len(fig.axes[0].patches), 25)
test_obj.rotor.slot.W3 = 0
test_obj.rotor.slot.H3 = 0
test_obj.rotor.slot.H4 = 0
test_obj.rotor.plot()
fig = plt.gcf()
fig.savefig(join(save_path, "test_Lam_Wind_s60_2-Rotor Wind.png"))
# 1 for Lam, Zs*2 for wind
self.assertEqual(len(fig.axes[0].patches), 25)
| 32.746667 | 74 | 0.690147 | 1,265 | 0.515065 | 0 | 0 | 0 | 0 | 0 | 0 | 369 | 0.150244 |
dc4f5a6347fb5f09a9911bb06688dfc87583ba36 | 23,082 | py | Python | search/views.py | ashwin31/opensource-job-portal | 2885ea52f8660e893fe0531c986e3bee33d986a2 | [
"MIT"
] | 1 | 2021-09-27T05:01:39.000Z | 2021-09-27T05:01:39.000Z | search/views.py | kiran1415/opensource-job-portal | 2885ea52f8660e893fe0531c986e3bee33d986a2 | [
"MIT"
] | null | null | null | search/views.py | kiran1415/opensource-job-portal | 2885ea52f8660e893fe0531c986e3bee33d986a2 | [
"MIT"
] | 1 | 2022-01-05T09:02:32.000Z | 2022-01-05T09:02:32.000Z | import json
import math
import re
from django.urls import reverse
from django.http.response import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.template.defaultfilters import slugify
from django.db.models import Q, F
from haystack.query import SQ, SearchQuerySet
from django.http import QueryDict
# from haystack.views import SearchView
from mpcomp.views import (get_prev_after_pages_count, get_valid_locations_list,
get_valid_skills_list, get_meta_data, get_404_meta)
from peeldb.models import (City, FunctionalArea, Industry, JobPost,
Qualification, Skill, State)
from pjob.refine_search import refined_search
from pjob.views import get_page_number
from search.forms import job_searchForm
from dashboard.tasks import save_search_results
# class search_job(SearchView):
# template_name = 'search/search_results.html'
# queryset = SearchQuerySet()
# form_class = job_searchForm
# def get_queryset(self):
# queryset = super(search_job, self).get_queryset()
# return queryset
# def get_context_data(self):
# context = super(search_job, self).get_context_data()
# return context
# def get_results(self):
# results = self.form.search()
# return results
# def build_page(self):
# jobs_list = self.results
# no_of_jobs = len(jobs_list)
# items_per_page = 10
# no_pages = int(math.ceil(float(jobs_list.count()) / items_per_page))
# page = 1
# jobs_list = jobs_list[
# (page - 1) * items_per_page:page * items_per_page]
# prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(page, no_pages)
# return (aft_page, after_page, prev_page, previous_page, page, no_pages, no_of_jobs, jobs_list)
# def create_response(self):
# (aft_page, after_page, prev_page, previous_page,
# page, no_pages, no_of_jobs) = self.build_page()
# results = [r.object for r in self.results]
# param = ""
# param = ('&' + 'q=' + self.form.cleaned_data['q'] + '&location=' + self.form.cleaned_data['location'] +
# '&experience=' + str(self.form.cleaned_data['experience'] or "") + '&salary=' + str(self.form.cleaned_data['salary'] or "") +
# '&industry=' + str(self.form.cleaned_data['industry'] or "") + '&functional_area=' + str(self.form.cleaned_data['functional_area'] or ""))
# context = {
# 'query': self.query,
# 'query_form': self.form,
# 'page': page,
# 'results': results,
# 'suggestion': None,
# 'param': param,
# 'aft_page': aft_page,
# 'after_page': after_page,
# 'prev_page': prev_page,
# 'previous_page': previous_page,
# 'current_page': page,
# 'last_page': no_pages,
# 'no_of_jobs': no_of_jobs,
# 'skill': self.form.cleaned_data['q'],
# 'location': self.form.cleaned_data['location'],
# }
# return render_to_response(self.template, context, context_instance=self.context_class(self.request))
def custom_search(data, request):
form = job_searchForm(data)
searched_locations = searched_skills = searched_edu = searched_industry = searched_states = ''
if request.POST.get('refine_search') == 'True':
jobs_list, searched_skills, searched_locations, searched_industry, searched_edu, searched_states = refined_search(request.POST)
else:
jobs_list = form.search()
jobs_list = JobPost.objects.filter(pk__in=[r.pk for r in jobs_list])
searched_locations = City.objects.filter(name__in=data.get('location'))
searched_skills = Skill.objects.filter(name__in=data.get('q'))
jobs_list = jobs_list.filter(status="Live")
job_type = data.get('job_type') or request.POST.get('job_type') or request.GET.get('job_type')
if job_type:
jobs_list = jobs_list.filter(job_type=job_type)
if data.get('walk-in'):
jobs_list = jobs_list.filter(job_type="walk-in")
no_of_jobs = len(jobs_list)
items_per_page = 20
no_pages = int(math.ceil(float(jobs_list.count()) / items_per_page))
page = request.POST.get('page') or data.get('page')
if page and bool(re.search(r"[0-9]", page)) and int(page) > 0:
if int(page) > (no_pages + 2):
page = 1
else:
page = int(data.get('page'))
else:
page = 1
jobs_list = jobs_list.select_related('company', 'user').prefetch_related('location', 'skills', 'industry').distinct()
jobs_list = jobs_list[(page - 1) * items_per_page:page * items_per_page]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(page, no_pages)
if form.is_valid():
context = {
'results': form.search(),
'query': form.query(),
'searchform': form,
'aft_page': aft_page,
'after_page': after_page,
'prev_page': prev_page,
'previous_page': previous_page,
'current_page': page,
'last_page': no_pages,
'no_of_jobs': no_of_jobs,
'job_list': jobs_list,
'skill': form.cleaned_data['q'],
'location': form.cleaned_data['location'],
'searched_skills': searched_skills,
'searched_locations': searched_locations,
'searched_industry': searched_industry,
'searched_edu': searched_edu,
'searched_experience': request.POST.get('experience'),
'searched_job_type': request.POST.get('job_type'),
'searched_functional_area': request.POST.get('functional_area'),
}
return context
return {'job_list': []}
def custome_search(request, skill_name, city_name, **kwargs):
current_url = reverse('custome_search', kwargs={'skill_name': skill_name, 'city_name': city_name})
if kwargs.get('page_num') == '1' or request.GET.get('page') == '1':
return redirect(current_url, permanent=True)
if 'page' in request.GET:
url = current_url + request.GET.get('page') + '/'
return redirect(url, permanent=True)
final_skill = get_valid_skills_list(skill_name)
final_location = get_valid_locations_list(city_name)
if request.POST:
save_search_results.delay(request.META['REMOTE_ADDR'], request.POST, 0, request.user.id)
if not final_location or not final_skill:
template = 'mobile/404.html' if request.is_mobile else '404.html'
return render(request, template, {'message': 'Unfortunately, we are unable to locate the jobs you are looking for',
'data_empty': True, 'job_search': True,
'reason': "Only Valid Skills/Cities names are accepted in search",
'searched_skills': final_skill or [skill_name],
'searched_locations': final_location or [city_name]}, status=404)
job_type = request.POST.get('job_type') or request.GET.get('job_type')
if request.POST.get('refine_search') == 'True':
job_list, searched_skills, searched_locations, searched_industry, searched_edu, searched_states = refined_search(request.POST)
else:
search_dict = QueryDict('', mutable=True)
search_dict.setlist('refine_skill', final_skill)
search_dict.setlist('refine_location', final_location)
if job_type:
search_dict.update({'job_type': job_type})
if request.POST.get('experience'):
search_dict.update({'refine_experience_min': request.POST.get('experience')})
job_list, searched_skills, searched_locations, searched_industry, searched_edu, searched_states = refined_search(search_dict)
if job_list:
no_of_jobs = job_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(page, no_pages)
job_list = job_list[(page - 1) * items_per_page:page * items_per_page]
meta_title, meta_description, h1_tag = get_meta_data('skill_location_jobs', {'skills': searched_skills,
'final_skill': final_skill,
'page': page,
'locations': searched_locations,
'final_location': final_location})
data = {'job_list': job_list,
'aft_page': aft_page,
'after_page': after_page,
'prev_page': prev_page,
'previous_page': previous_page,
'current_page': page,
'last_page': no_pages,
'no_of_jobs': no_of_jobs,
"is_job_list": False,
'current_url': current_url,
'searched_skills': searched_skills,
'searched_states': searched_states,
'searched_locations': searched_locations,
'searched_industry': searched_industry,
'searched_edu': searched_edu,
'searched_experience': request.POST.get('experience'),
'searched_job_type': job_type,
'meta_title': meta_title,
'meta_description': meta_description,
'h1_tag': h1_tag,
}
template = 'jobs/jobs_list.html'
if request.is_mobile:
data.update({'searched_industry': request.POST.get('industry'),
'searched_functional_area': request.POST.get('functional_area')})
template = 'mobile/jobs/list.html'
return render(request, template, data)
else:
template = 'mobile/404.html' if request.is_mobile else '404.html'
meta_title, meta_description = get_404_meta(
'skill_location_404', {'skill': final_skill, 'city': final_location})
return render(request, template, {'message': 'Unfortunately, we are unable to locate the jobs you are looking for',
'reason': "Only Valid Skills/Cities names are accepted in search",
'job_search': True,
'searched_skills': searched_skills, 'searched_locations': searched_locations,
'meta_title': meta_title, 'meta_description': meta_description})
def custom_walkins(request, skill_name, city_name, **kwargs):
current_url = reverse('custom_walkins', kwargs={'skill_name': skill_name, 'city_name': city_name})
if kwargs.get('page_num') == '1' or request.GET.get('page') == '1':
return redirect(current_url, permanent=True)
if 'page' in request.GET:
url = current_url + request.GET.get('page') + '/'
return redirect(url, permanent=True)
final_skill = get_valid_skills_list(skill_name)
final_location = get_valid_locations_list(city_name)
if not final_location or not final_skill:
if request.POST:
save_search_results.delay(request.META['REMOTE_ADDR'], request.POST, 0, request.user.id)
location = final_location or [city_name]
skills = final_skill or [skill_name]
template = 'mobile/404.html' if request.is_mobile else '404.html'
meta_title = meta_description = ''
return render(request, template, {'message': 'Unfortunately, we are unable to locate the jobs you are looking for',
'searched_job_type': 'walk-in',
'reason': "Only Valid Skills/Cities names are accepted in search",
'searched_skills': skills, 'searched_locations': location,
'meta_title': meta_title, 'meta_description': meta_description,
'data_empty': True,
'job_search': True}, status=404)
if request.POST.get('refine_search') == 'True':
job_list, searched_skills, searched_locations, searched_industry, searched_edu, searched_states = refined_search(request.POST)
else:
search_dict = QueryDict('', mutable=True)
search_dict.setlist('refine_skill', final_skill)
search_dict.setlist('refine_location', final_location)
search_dict.update({'job_type': 'walk-in'})
if request.POST.get('experience'):
search_dict.update({'refine_experience_min': request.POST.get('experience')})
job_list, searched_skills, searched_locations, searched_industry, searched_edu, searched_states = refined_search(search_dict)
if job_list:
no_of_jobs = job_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(page, no_pages)
job_list = job_list[(page - 1) * items_per_page:page * items_per_page]
meta_title, meta_description, h1_tag = get_meta_data('skill_location_walkin_jobs', {'skills': searched_skills,
'final_skill': final_skill,
'page': page,
'locations': searched_locations,
'final_location': final_location})
data = {'job_list': job_list,
'aft_page': aft_page,
'after_page': after_page,
'prev_page': prev_page,
'previous_page': previous_page,
'current_page': page,
'last_page': no_pages,
'no_of_jobs': no_of_jobs,
"is_job_list": False,
'current_url': current_url,
'searched_skills': searched_skills,
'searched_states': searched_states,
'searched_locations': searched_locations,
'searched_industry': searched_industry,
'searched_edu': searched_edu,
'searched_experience': request.POST.get('experience'),
'searched_job_type': 'walk-in',
'meta_title': meta_title,
'meta_description': meta_description,
'h1_tag': h1_tag,
'walkin': True
}
template = 'jobs/jobs_list.html'
if request.is_mobile:
data.update({'searched_industry': request.POST.get('industry'),
'searched_functional_area': request.POST.get('functional_area')})
template = 'mobile/jobs/list.html'
return render(request, template, data)
else:
template = 'mobile/404.html' if request.is_mobile else '404.html'
meta_title, meta_description = get_404_meta(
'skill_location_404', {'skill': final_skill, 'city': final_location})
return render(request, template, {'message': 'Unfortunately, we are unable to locate the jobs you are looking for',
'reason': "Only Valid Skills/Cities names are accepted in search",
'job_search': True,
'searched_skills': searched_skills, 'searched_locations': searched_locations,
'meta_title': meta_title, 'meta_description': meta_description})
def skill_auto_search(request):
text = request.GET.get('text', '').split(', ')[:-1]
search = request.GET.get('q', '')
sqs = SearchQuerySet().models(Skill).filter_and(SQ(skill_name__contains=search) |
SQ(skill_slug__contains=search))
if text:
sqs = sqs.exclude(skill_name__in=text)
suggestions = [{
'name': result.skill_name,
'slug': result.skill_slug,
'jobs_count': result.no_of_jobposts,
'id': result.pk
} for result in sqs]
suggestions = sorted(suggestions, key=lambda k: len(k['name']), reverse=False)
if not request.GET.get('search') == 'filter':
deg = SearchQuerySet().models(Qualification).filter_and(SQ(edu_name__contains=search) |
SQ(edu_slug__contains=search))
if text:
deg = deg.exclude(edu_name__in=text)
degrees = [{'name': result.edu_name, 'id': result.pk, 'slug': result.edu_slug} for result in deg]
suggestions = suggestions + degrees
# suggestions = sorted(suggestions, key=int(itemgetter('jobs_count'), reverse=True)
the_data = json.dumps({
'results': suggestions[:10]
})
return HttpResponse(the_data, content_type='application/json')
def city_auto_search(request):
text = request.GET.get('text', '').split(', ')[:-1]
search = request.GET.get('location', '')
sqs = SearchQuerySet().models(City).filter(city_name__contains=search)
if text:
sqs = sqs.exclude(city_name__in=text)
suggestions = [{
'name': result.city_name,
'jobs_count': result.no_of_jobposts,
'id': result.pk
} for result in sqs]
suggestions = sorted(suggestions, key=lambda k: len(k['name']), reverse=False)
if not request.GET.get('search') == 'filter':
state = SearchQuerySet().models(State).filter_and(SQ(state_name__contains=search) |
SQ(state_slug__contains=search))
state = state.exclude(is_duplicate__in=[True])
if text:
state = state.exclude(state_name__in=text)
states = [{'name': result.state_name, 'id': result.pk, 'slug': result.state_slug} for result in state]
suggestions = suggestions + states
the_data = json.dumps({
'results': suggestions[:10]
})
return HttpResponse(the_data, content_type='application/json')
def industry_auto_search(request):
sqs = SearchQuerySet().models(Industry).filter(
industry_name__icontains=request.GET.get('industry', ''))
suggestions = [{
'name': result.industry_name.split('/')[0],
'jobs_count': result.no_of_jobposts,
'id': result.pk,
'slug': result.industry_slug,
} for result in sqs]
# suggestions = sorted(suggestions, key=lambda k: int(k['jobs_count']), reverse=True)
the_data = json.dumps({
'results': suggestions[:10]
})
return HttpResponse(the_data, content_type='application/json')
def functional_area_auto_search(request):
sqs = SearchQuerySet().models(FunctionalArea).filter(
functionalarea_name__contains=request.GET.get('functional_area', ''))[:10]
suggestions = [{
'name': result.functionalarea_name,
'jobs_count': result.no_of_jobposts
} for result in sqs]
suggestions = sorted(suggestions, key=lambda k: int(k['jobs_count']), reverse=True)
the_data = json.dumps({
'results': suggestions
})
return HttpResponse(the_data, content_type='application/json')
def education_auto_search(request):
degrees = SearchQuerySet().models(Qualification).filter_and(SQ(edu_name__contains=request.GET.get('education', '')) |
SQ(edu_slug__contains=request.GET.get('education', '')))
suggestions = [{'name': result.edu_name,
'id': result.pk,
'slug': result.edu_slug,
'jobs_count': result.no_of_jobposts or 0
} for result in degrees]
suggestions = sorted(suggestions, key=lambda k: int(k['jobs_count']), reverse=True)
the_data = json.dumps({'results': suggestions[:10]})
return HttpResponse(the_data, content_type='application/json')
def state_auto_search(request):
text = request.GET.get('text', '').split(', ')[:-1]
states = SearchQuerySet().models(State).filter_and(SQ(state_name__contains=request.GET.get('state', '')) |
SQ(state_slug__contains=request.GET.get('state', '')))
if text:
states = states.exclude(state_name__in=text)
suggestions = [{'name': result.state_name,
'id': result.pk,
'slug': result.state_slug,
'jobs_count': result.no_of_jobposts or 0
} for result in states]
suggestions = sorted(suggestions, key=lambda k: int(k['jobs_count']), reverse=True)
the_data = json.dumps({'results': suggestions[:10]})
return HttpResponse(the_data, content_type='application/json')
def search_slugs(request):
searched = request.GET.get('q_slug', '').replace('jobs', '').replace('job', '')
search_list = [i.strip() for i in searched.split(',') if i.strip()]
slug = ''
for search in search_list:
skills = Skill.objects.filter(Q(slug__iexact=search) | Q(name__iexact=search))
degrees = Qualification.objects.filter(Q(slug__iexact=search) | Q(name__iexact=search))
for skill in skills:
slug += ('-' + skill.slug) if slug else skill.slug
for degree in degrees:
slug += ('-' + degree.slug) if slug else degree.slug
if not skills and not degrees:
slug += ('-' + slugify(search)) if slug else slugify(search)
location = request.GET.get('location', '')
location_slug = ''
if location:
search_list = [i.strip() for i in location.split(',') if i.strip()]
for search in search_list:
locations = City.objects.filter(
Q(Q(slug__iexact=search) | Q(name__iexact=search)) & ~Q(state__name=F('name')))
states = State.objects.filter(Q(slug__iexact=search) | Q(name__iexact=search))
for loc in locations:
location_slug += ('-' + loc.slug) if location_slug else loc.slug
for state in states:
location_slug += ('-' + state.slug) if location_slug else state.slug
if not location_slug:
location_slug = slugify(location)
the_data = json.dumps({'skill_slug': slug, 'location_slug': location_slug})
return HttpResponse(the_data, content_type='application/json')
| 50.39738 | 157 | 0.598605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,224 | 0.269647 |
dc4fefe1808974939cccf075125250d15a87b8f6 | 871 | py | Python | IslandGIS/feedback/forms.py | eRestin/MezzGIS | 984341fa5361433cf9b6f30b113358c19d3cd05c | [
"BSD-2-Clause"
] | null | null | null | IslandGIS/feedback/forms.py | eRestin/MezzGIS | 984341fa5361433cf9b6f30b113358c19d3cd05c | [
"BSD-2-Clause"
] | null | null | null | IslandGIS/feedback/forms.py | eRestin/MezzGIS | 984341fa5361433cf9b6f30b113358c19d3cd05c | [
"BSD-2-Clause"
] | null | null | null | from flexipage.forms import FlexiModelForm
from mezzanine.core.forms import Html5Mixin
from django import forms
from models import Feedback
class FeedbackForm(FlexiModelForm, Html5Mixin):
class Meta:
model = Feedback
name = forms.CharField(widget=forms.TextInput(attrs = {'placeholder': 'Name', 'class': 'namesclass'}), label='')
email = forms.CharField(widget=forms.TextInput(attrs = {'placeholder': 'E-Mail', 'class': 'emailclass'}), label='')
telephone = forms.CharField(widget=forms.TextInput(attrs = {'placeholder': 'Telephone', 'class': 'telephoneclass'}), label='')
address = forms.CharField(widget=forms.TextInput(attrs = {'placeholder': 'Address', 'class': 'addressclass'}), label='')
content = forms.CharField(widget=forms.Textarea(attrs = {'placeholder': 'Enquiry', 'rows': 4, 'cols': 40, 'class': 'contentsclass'}), label='') | 62.214286 | 147 | 0.708381 | 729 | 0.836969 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.268657 |
dc50566acf856455bdeee2766f9b15e1576f4438 | 2,953 | py | Python | research_site/blog/models.py | MatthewTe/research_site | cec4b09823488effdc4b249716b52530484a49c8 | [
"MIT"
] | null | null | null | research_site/blog/models.py | MatthewTe/research_site | cec4b09823488effdc4b249716b52530484a49c8 | [
"MIT"
] | null | null | null | research_site/blog/models.py | MatthewTe/research_site | cec4b09823488effdc4b249716b52530484a49c8 | [
"MIT"
] | null | null | null | # Importing default django packages:
from django.db import models
from django.template.defaultfilters import slugify
# Importing models from the research core:
from research_core.models import Topic
# Importing 3rd party packages:
from tinymce import models as tinymce_models
class BlogPost(models.Model):
"""The main model for Blog Posts. The main content for the blog is rendered via a
tinymce HTML field and it connects to the research_core application via a ForeginKey
connecton to the Topic model.
Args:
title (models.CharField): The title of the blog post that is displayed on the page and
is used to generate the slug (unique ID) for the post.
blog_thumbnail (models.ImageField): The image that is used as the thumbnail and header image
in the blog post. If none is provided that thumbnail img for the posts' Topic object is used.
content (tinymce.HTMLField): The main content of the blog post. It is HTML content that is stored in
the database and is ediable in the admin page as a fully functional text editor. This field is
a 3rd party package called TinyMCE that deals with all CRUD functions of the text field.
published_at (models.DateTimeField): The datetime when the blog post was created.
last_updated (models.DateTimeField): The datetime when the last changes were made to the model instance.
slug (models.SlugField): The unique URL identifier that is used to query specific blog posts. It is generated
by 'slugifying' the title or can be directly created.
topic (models.ForeignKey): The topic that the blog post is assocaited with. It connects to the topic object in the
'research core' application via a ForeignKey.
"""
# Model Specific Fields:
title = models.CharField(max_length=250, unique=True)
blog_thumbnail = models.ImageField(upload_to="blog/thumbnails", null=True, blank=True, default=None)
content = tinymce_models.HTMLField()
published_at = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
slug = models.SlugField(max_length=300, unique=True, null=True, blank=True)
# Foregin Connection Fields:
topic = models.ForeignKey(Topic, on_delete=models.SET_NULL, null=True)
def save(self, *args, **kwargs):
# If slug field is empty, generate slug based off of title:
if self.slug:
pass
else:
self.slug = slugify(self.title)
# If there was no image field provided, use the thumbnail from the topic ForeginKey as the thumbnail:
if self.blog_thumbnail:
pass
else:
self.blog_thumbnail = self.topic.topic_img
super(BlogPost, self).save(*args, **kwargs)
class Meta:
ordering = ["-published_at"]
def __str__(self):
return self.title | 43.426471 | 122 | 0.69658 | 2,674 | 0.90552 | 0 | 0 | 0 | 0 | 0 | 0 | 1,848 | 0.625804 |
dc507a2b9a315438fd19294af7cfbee848a645ea | 1,082 | py | Python | pythonlearn/input.py | kuljotbiring/Python | 743c93b91c5e4a4bf5066cf50e72e5a51d98d1ad | [
"MIT"
] | null | null | null | pythonlearn/input.py | kuljotbiring/Python | 743c93b91c5e4a4bf5066cf50e72e5a51d98d1ad | [
"MIT"
] | null | null | null | pythonlearn/input.py | kuljotbiring/Python | 743c93b91c5e4a4bf5066cf50e72e5a51d98d1ad | [
"MIT"
] | null | null | null | # Write a program that asks the user what kind of rental car they
# would like. Print a message about that car, such as “Let me see if I can find you
# a Subaru.”
car = input("What type of rental rental car would you like? ")
print(f"Checking database to find a {car}")
# Write a program that asks the user how many people
# are in their dinner group. If the answer is more than eight, print a message saying
# they’ll have to wait for a table. Otherwise, report that their table is ready.
num_guests = input("Goodevening, how many in your dinner party group? ")
num_guests = int(num_guests)
if num_guests > 8:
print("I'm sorry, you will have to wait for a table")
else:
print("Right this way, we have an open table for you")
# Ask the user for a number, and then report whether the
# number is a multiple of 10 or not.
number = input("Please enter a number and I'll tell you if its a multiple of 10: ")
number = int(number)
if number % 10 == 0:
print(f"The number {number} is a multiple of 10")
else:
print(f"The number {number} is not a multiple of 10")
| 33.8125 | 85 | 0.712569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 860 | 0.790441 |
dc5081a7fb1b8aa3ced2ff534ca18e003c6bcf33 | 11,421 | py | Python | compression/nn.py | andiac/localbitsback | 1e5eaf49246a50c1b15a7cb724ea91560388f0ee | [
"MIT"
] | 28 | 2019-11-02T00:27:03.000Z | 2021-12-12T00:48:56.000Z | compression/nn.py | andiac/localbitsback | 1e5eaf49246a50c1b15a7cb724ea91560388f0ee | [
"MIT"
] | 2 | 2019-11-21T09:14:41.000Z | 2021-06-03T17:45:10.000Z | compression/nn.py | andiac/localbitsback | 1e5eaf49246a50c1b15a7cb724ea91560388f0ee | [
"MIT"
] | 3 | 2019-11-04T13:29:05.000Z | 2021-05-24T03:06:12.000Z | from contextlib import contextmanager
import torch
import torch.nn.functional as F
from torch.nn import Module, Parameter
from torch.nn import init
_WN_INIT_STDV = 0.05
_SMALL = 1e-10
_INIT_ENABLED = False
def is_init_enabled():
return _INIT_ENABLED
@contextmanager
def init_mode():
global _INIT_ENABLED
assert not _INIT_ENABLED
_INIT_ENABLED = True
yield
_INIT_ENABLED = False
class DataDepInitModule(Module):
"""
Module with data-dependent initialization
"""
def __init__(self):
super().__init__()
# self._wn_initialized = False
def _init(self, *args, **kwargs):
"""
Data-dependent initialization. Will be called on the first forward()
"""
raise NotImplementedError
def _forward(self, *args, **kwargs):
"""
The standard forward pass
"""
raise NotImplementedError
def forward(self, *args, **kwargs):
"""
Calls _init (with no_grad) if not initialized.
If initialized already, calls _forward.
"""
# assert self._wn_initialized == (not _INIT_ENABLED)
if _INIT_ENABLED: # not self._wn_initialized:
# self._wn_initialized = True
with torch.no_grad(): # no gradients for the init pass
return self._init(*args, **kwargs)
return self._forward(*args, **kwargs)
class Dense(DataDepInitModule):
def __init__(self, in_features, out_features, init_scale=1.0):
super().__init__()
self.in_features, self.out_features, self.init_scale = in_features, out_features, init_scale
self.w = Parameter(torch.Tensor(out_features, in_features))
self.b = Parameter(torch.Tensor(out_features))
init.normal_(self.w, 0, _WN_INIT_STDV)
init.zeros_(self.b)
def _init(self, x):
y = self._forward(x)
m = y.mean(dim=0)
s = self.init_scale / (y.std(dim=0) + _SMALL)
assert m.shape == s.shape == self.b.shape
self.w.copy_(self.w * s[:, None])
self.b.copy_(-m * s)
return self._forward(x)
def _forward(self, x):
return F.linear(x, self.w, self.b[None, :])
class WnDense(DataDepInitModule):
def __init__(self, in_features, out_features, init_scale=1.0):
super().__init__()
self.in_features, self.out_features, self.init_scale = in_features, out_features, init_scale
self.v = Parameter(torch.Tensor(out_features, in_features))
self.g = Parameter(torch.Tensor(out_features))
self.b = Parameter(torch.Tensor(out_features))
init.normal_(self.v, 0., _WN_INIT_STDV)
init.ones_(self.g)
init.zeros_(self.b)
def _init(self, x):
# calculate unnormalized activations
y_unnormalized = self._forward(x)
# set g and b so that activations are normalized
m = y_unnormalized.mean(dim=0)
s = self.init_scale / (y_unnormalized.std(dim=0) + _SMALL)
assert m.shape == s.shape == self.g.shape == self.b.shape
self.g.data.copy_(s)
self.b.data.sub_(m * s)
# forward pass again, now normalized
return self._forward(x)
def _forward(self, x):
(bs, in_features), out_features = x.shape, self.v.shape[0]
assert in_features == self.v.shape[1]
vnorm = self.v.norm(p=2, dim=1)
assert vnorm.shape == self.g.shape == self.b.shape
y = torch.addcmul(self.b[None, :], (self.g / vnorm)[None, :], x @ self.v.t())
# the line above is equivalent to: y = self.b[None, :] + (self.g / vnorm)[None, :] * (x @ self.v.t())
assert y.shape == (bs, out_features)
return y
def extra_repr(self):
return f'in_features={self.in_dim}, out_features={self.out_features}, init_scale={self.init_scale}'
class _Nin(DataDepInitModule):
def __init__(self, in_features, out_features, wn: bool, init_scale: float):
super().__init__()
base_module = WnDense if wn else Dense
self.dense = base_module(in_features=in_features, out_features=out_features, init_scale=init_scale)
self.height, self.width = None, None
def _preprocess(self, x):
"""(b,c,h,w) -> (b*h*w,c)"""
B, C, H, W = x.shape
if self.height is None or self.width is None:
self.height, self.width = H, W
else:
assert self.height == H and self.width == W, 'nin input image shape changed!'
assert C == self.dense.in_features
return x.permute(0, 2, 3, 1).reshape(B * H * W, C)
def _postprocess(self, x):
"""(b*h*w,c) -> (b,c,h,w)"""
BHW, C = x.shape
out = x.reshape(-1, self.height, self.width, C).permute(0, 3, 1, 2)
assert out.shape[1:] == (self.dense.out_features, self.height, self.width)
return out
def _init(self, x):
return self._postprocess(self.dense._init(self._preprocess(x)))
def _forward(self, x):
return self._postprocess(self.dense._forward(self._preprocess(x)))
class Nin(_Nin):
def __init__(self, in_features, out_features, init_scale=1.0):
super().__init__(in_features=in_features, out_features=out_features, wn=False, init_scale=init_scale)
class WnNin(_Nin):
def __init__(self, in_features, out_features, init_scale=1.0):
super().__init__(in_features=in_features, out_features=out_features, wn=True, init_scale=init_scale)
class Conv2d(DataDepInitModule):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, init_scale=1.0):
super().__init__()
self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, self.dilation, self.init_scale = \
in_channels, out_channels, kernel_size, stride, padding, dilation, init_scale
self.w = Parameter(torch.Tensor(out_channels, in_channels, self.kernel_size, self.kernel_size))
self.b = Parameter(torch.Tensor(out_channels))
init.normal_(self.w, 0, _WN_INIT_STDV)
init.zeros_(self.b)
def _init(self, x):
# x.shape == (batch, channels, h, w)
y = self._forward(x) # (batch, out_channels, h, w)
m = y.transpose(0, 1).reshape(y.shape[1], -1).mean(dim=1) # out_channels
s = self.init_scale / (y.transpose(0, 1).reshape(y.shape[1], -1).std(dim=1) + _SMALL) # out_channels
self.w.copy_(self.w * s[:, None, None, None]) # (out, in, k, k) * (ou))
self.b.copy_(-m * s)
return self._forward(x)
def _forward(self, x):
return F.conv2d(x, self.w, self.b, self.stride, self.padding, self.dilation, 1)
class WnConv2d(DataDepInitModule):
def __init__(self, in_channels, out_channels, kernel_size, padding, init_scale=1.0):
super().__init__()
self.in_channels, self.out_channels, self.kernel_size, self.padding = in_channels, out_channels, kernel_size, padding
self.init_scale = init_scale
self.v = Parameter(torch.Tensor(out_channels, in_channels, self.kernel_size, self.kernel_size))
self.g = Parameter(torch.Tensor(out_channels))
self.b = Parameter(torch.Tensor(out_channels))
init.normal_(self.v, 0., _WN_INIT_STDV)
init.ones_(self.g)
init.zeros_(self.b)
def _init(self, x):
# calculate unnormalized activations
y_bchw = self._forward(x)
assert len(y_bchw.shape) == 4 and y_bchw.shape[:2] == (x.shape[0], self.out_channels)
# set g and b so that activations are normalized
y_c = y_bchw.transpose(0, 1).reshape(self.out_channels, -1)
m = y_c.mean(dim=1)
s = self.init_scale / (y_c.std(dim=1) + _SMALL)
assert m.shape == s.shape == self.g.shape == self.b.shape
self.g.data.copy_(s)
self.b.data.sub_(m * s)
# forward pass again, now normalized
return self._forward(x)
def _forward(self, x):
vnorm = self.v.view(self.out_channels, -1).norm(p=2, dim=1)
assert vnorm.shape == self.g.shape == self.b.shape
w = self.v * (self.g / (vnorm + _SMALL)).view(self.out_channels, 1, 1, 1)
return F.conv2d(x, w, self.b, padding=self.padding)
def extra_repr(self):
return f'in_channels={self.in_dim}, out_channels={self.out_channels}, kernel_size={self.kernel_size}, padding={self.padding}, init_scale={self.init_scale}'
class LearnedNorm(DataDepInitModule):
def __init__(self, shape, init_scale=1.0):
super().__init__()
self.init_scale = init_scale
self.g = Parameter(torch.ones(*shape))
self.b = Parameter(torch.zeros(*shape))
def _init(self, x, *, inverse):
assert not inverse
assert x.shape[1:] == self.g.shape == self.b.shape
m_init = x.mean(dim=0)
scale_init = self.init_scale / (x.std(dim=0) + _SMALL)
self.g.copy_(scale_init)
self.b.copy_(-m_init * scale_init)
return self._forward(x, inverse=inverse)
def get_gain(self):
return torch.clamp(self.g, min=1e-10)
def _forward(self, x, *, inverse):
"""
inverse == False to normalize; inverse == True to unnormalize
"""
assert x.shape[1:] == self.g.shape == self.b.shape
assert x.dtype == self.g.dtype == self.b.dtype
g = self.get_gain()
if not inverse:
return x * g[None] + self.b[None]
else:
return (x - self.b[None]) / g[None]
@torch.no_grad()
def _test_data_dep_init(m, x, init_scale, verbose=True, tol=1e-8, kwargs=None):
if kwargs is None:
kwargs = {}
with init_mode():
y_init = m(x, **kwargs)
y = m(x, **kwargs)
assert (y - y_init).abs().max() < tol, 'init pass output does not match normal forward pass'
y_outputs_flat = y.transpose(0, 1).reshape(y.shape[1], -1) # assumes axis 1 is the output axis
assert y_outputs_flat.mean(dim=1).abs().max() < tol, 'means wrong after normalization'
assert (y_outputs_flat.std(dim=1) - init_scale).abs().max() < tol, 'standard deviations wrong after normalization'
if verbose:
print('ok')
def test_dense():
bs = 128
in_features = 20
out_features = 29
init_scale = 3.14159
x = torch.randn(bs, in_features, dtype=torch.float64)
for module in [Dense, WnDense]:
m = module(in_features=in_features, out_features=out_features, init_scale=init_scale).double()
_test_data_dep_init(m, x, init_scale)
assert m(x).shape == (bs, out_features)
def test_conv2d():
bs = 128
in_channels = 20
out_channels = 29
height = 9
width = 11
init_scale = 3.14159
x = torch.randn(bs, in_channels, height, width, dtype=torch.float64)
for module in [Conv2d, WnConv2d]:
m = module(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1,
init_scale=init_scale).double()
_test_data_dep_init(m, x, init_scale)
assert m(x).shape == (bs, out_channels, height, width)
def test_learnednorm():
bs = 128
in_features = 20
init_scale = 3.14159
x = torch.rand(bs, in_features, dtype=torch.float64)
m = LearnedNorm(shape=(in_features,), init_scale=init_scale).double()
_test_data_dep_init(m, x, init_scale, kwargs={'inverse': False})
y = m(x, inverse=False)
assert y.shape == (bs, in_features)
assert torch.allclose(m(y, inverse=True), x), 'inverse failed'
| 36.488818 | 163 | 0.631906 | 9,011 | 0.788985 | 131 | 0.01147 | 806 | 0.070572 | 0 | 0 | 1,555 | 0.136153 |
dc50edea6ac2695524daf7ee1c39c40e72cdd3da | 47,881 | py | Python | dreamerv2/common/nets.py | footoredo/dreamerv2 | 493e1c0b92cf667a4b4fdcaf8f805273beeb165f | [
"MIT"
] | null | null | null | dreamerv2/common/nets.py | footoredo/dreamerv2 | 493e1c0b92cf667a4b4fdcaf8f805273beeb165f | [
"MIT"
] | null | null | null | dreamerv2/common/nets.py | footoredo/dreamerv2 | 493e1c0b92cf667a4b4fdcaf8f805273beeb165f | [
"MIT"
] | null | null | null | import re
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers as tfkl
from tensorflow_probability import distributions as tfd
from tensorflow.keras.mixed_precision import experimental as prec
import common
class EnsembleRSSM(common.Module):
def __init__(
self, config, ensemble=5, stoch=30, deter=200, hidden=200, discrete=False,
act='elu', norm='none', std_act='softplus', min_std=0.1, exclude_deter_feat=False,
use_transformer=False, num_actions=None, transformer=None, use_forward_loss=False,
use_transformer_reward_head=False, encoder=None, raw_transformer=None, inside_transformer=None):
super().__init__()
self.config = config
self._ensemble = ensemble
self._stoch = stoch
self._deter = deter
self._hidden = hidden
self._discrete = discrete
self._act = get_act(act)
self._norm = norm
self._std_act = std_act
self._min_std = min_std
# self._seq_model = seq_model
self._num_actions = num_actions
self._exclude_deter_feat = exclude_deter_feat
self.use_transformer = use_transformer
self._use_forward_loss = use_forward_loss
self.use_transformer_reward_head = use_transformer_reward_head and use_transformer
# print("seq_model:", seq_model)
# rnn state: {"stoch": [batch_size, stoch, discrete], "deter": [batch_size, deter]}
# transformer state: {"stoch": [batch_size, stoch, discrete], "t_stoch": [batch_size, seq_len - 1, stoch, discrete],
# "t_action": [batch_size, seq_len - 1, n_action], "deter": [batch_size, deter]}
self._cell = GRUCell(self._deter, norm=True)
self._memory_size = transformer.pop("memory_size")
self._inside_memory_size = inside_transformer.pop("memory_size")
self._transformer_version = transformer.pop("version")
# self.transformer_num_layers = transformer["num_layers"]
# self._transformer_num_heads = transformer["num_heads"]
self._transformer_params = transformer
self._use_raw_input_in_transformer = config.use_raw_input_in_transformer and self.use_transformer
self._use_independent_transformer = config.use_independent_transformer and self.use_transformer
self._use_independent_state_transformer = config.use_independent_state_transformer and self.use_transformer
self._use_inside_transformer = config.use_inside_transformer and self._use_independent_transformer
self._inside_transformer_include_action = config.inside_transformer_include_action
self._inside_transformer_include_importance = config.inside_transformer_include_importance
self._myopic_prediction = config.myopic_prediction
self.use_independent_transformer_encoder = config.use_independent_transformer_encoder
self._include_transformer_embed = config.include_transformer_embed
self._transformer_shift = config.transformer_shift
if self.use_transformer:
if self._use_independent_transformer:
assert self._use_raw_input_in_transformer
self._transformer = common.TransformerNew(output_dim=hidden, **raw_transformer)
if self._use_inside_transformer:
self._inside_transformer = common.Transformer(output_dim=hidden, no_pe=True, **inside_transformer)
else:
self._transformer = common.Transformer(output_dim=hidden, **transformer)
if self._use_independent_state_transformer:
self._state_transformer = common.TransformerNew(output_dim=hidden, **raw_transformer)
else:
self._transformer = None
self._transformer_encoder = None
if self._use_raw_input_in_transformer:
if (self._use_independent_transformer and self.use_independent_transformer_encoder) or (not self._use_independent_transformer):
print("Transformer encoder:", encoder)
self._transformer_encoder = encoder or common.Encoder(**config.encoder)
self._importance_head = None
self._cast = lambda x: tf.cast(x, prec.global_policy().compute_dtype)
def save_transformer(self, save_dir):
if self._transformer is not None:
self._transformer.save(save_dir / "transformer.pkl")
if self._transformer_encoder is not None:
self._transformer_encoder.save(save_dir / "transformer_encoder.pkl")
if self._importance_head is not None:
self._importance_head.save(save_dir / "importance_head.pkl")
def load_transformer(self, load_dir):
if self._transformer is not None:
self._transformer.load(load_dir / "transformer.pkl")
if self._transformer_encoder is not None:
self._transformer_encoder.load(load_dir / "transformer_encoder.pkl")
if self._importance_head is not None:
self._importance_head.load(load_dir / "importance_head.pkl")
def set_importance_head(self, head):
self._importance_head = head
def transformer_encode(self, data, default=None):
if self._transformer_encoder is None:
return default
else:
return self._transformer_encoder(data)
def initial(self, batch_size, transformer_weight=False):
dtype = prec.global_policy().compute_dtype
if self._discrete:
stoch_size = [self._stoch, self._discrete]
total_stoch = self._stoch * self._discrete
state = dict(
logit=tf.zeros([batch_size] + stoch_size, dtype),
stoch=tf.zeros([batch_size] + stoch_size, dtype))
else:
stoch_size = [self._stoch]
total_stoch = self._stoch
state = dict(
mean=tf.zeros([batch_size] + stoch_size, dtype),
std=tf.zeros([batch_size] + stoch_size, dtype),
stoch=tf.zeros([batch_size] + stoch_size, dtype))
state['deter'] = self._cell.get_initial_state(None, batch_size, dtype)
if self._use_forward_loss:
state['forward_stoch'] = tf.zeros([batch_size] + stoch_size, dtype)
if self._myopic_prediction:
state['myopic_out'] = tf.zeros([batch_size, self._hidden], dtype)
if self.use_transformer:
# state['deter'] = tf.zeros([batch_size, self._deter], dtype)
if not self._use_independent_transformer:
if self._use_raw_input_in_transformer:
# state['t_images'] = tf.zeros([batch_size, self._memory_size, *self.config.render_size, 3])
# state['t_actions'] = tf.zeros([batch_size, self._memory_size, self._num_actions])
state['t_hidden'] = tf.zeros([batch_size, self._memory_size, self._hidden])
state['t_counter'] = tf.zeros([batch_size, 1])
else:
state['t_hidden'] = tf.zeros([batch_size, self._memory_size, total_stoch + self._deter])
state['t_transformer'] = tf.zeros([batch_size, self._deter])
# state['mask'] = tf.zeros([batch_size, 1, 1, self._memory_size])
# state['weights'] = tf.zeros([batch_size, self._memory_size, self._memory_size])
# state['x'] = tf.zeros([batch_size, self._deter])
# state['t_stoch'] = tf.zeros([batch_size, self._memory_size] + stoch_size)
# state['t_action'] = tf.zeros([batch_size, self._memory_size, self._num_actions])
# state['t_len'] = tf.zeros([batch_size, 1], dtype=tf.int32)
if transformer_weight:
for i in range(self._transformer.num_layers):
state[f't_weight_{i}'] = tf.zeros([batch_size, self._transformer.num_heads, self._memory_size])
# state[f't_weight_norm_{i}'] = tf.zeros([batch_size, self._transformer_num_heads])
elif self._use_inside_transformer:
token_size = total_stoch + self._deter
if self._inside_transformer_include_action:
token_size += self._num_actions
if self._inside_transformer_include_importance:
token_size += 1
state['t_memory'] = tf.zeros([batch_size, self._inside_memory_size, token_size])
state['t_importance'] = tf.zeros([batch_size, self._inside_memory_size])
return state
# @property
# def use_rnn(self):
# return self._seq_model == 'rnn'
# @property
# def use_transformer(self):
# return self._seq_model == 'transformer'
def calc_independent_transformer_hidden(self, embed, action, is_first, training, transformer=None, transformer_shift=None, return_weight=False):
print("in calc_independent_transformer_hidden()")
print("embed.shape", embed.shape, flush=True) # [batch, length, embed]
print("action.shape", action.shape, flush=True)
print("is_first.shape", is_first.shape, flush=True)
if transformer is None:
transformer = self._transformer
if transformer_shift is None:
transformer_shift = self._transformer_shift
if transformer_shift:
shifted_embed = tf.concat([tf.zeros_like(embed[:, :1, :]), embed[:, :-1, :]], 1)
x = tf.concat([shifted_embed, action], -1)
else:
x = tf.concat([embed, action], -1)
out, weight = transformer(x, is_first, training)
print("out.shape", out.shape, flush=True)
if return_weight:
return out, weight
else:
return out
# state[i - 1] + action[i] => state[i]
@tf.function
def observe(self, embed, transformer_embed, image, action, is_first, training, state=None, transformer_weight=False):
swap = lambda x: tf.transpose(x, [1, 0] + list(range(2, len(x.shape))))
if state is None:
state = self.initial(tf.shape(action)[0], transformer_weight=transformer_weight)
print("State:")
for k, v in state.items():
print(k, v.shape, flush=True)
if self._use_independent_transformer:
_state = dict()
for k, v in state.items():
if k != 't_transformer' and not k.startswith('t_weight_') and not k.startswith('t_state_weight_'):
_state[k] = v
state = _state
# try:
# state.pop('t_transformer')
# for i in range(self.transformer_num_layers):
# state.pop(f't_weight_{i}')
# except KeyError:
# pass
prev_image = tf.concat([tf.zeros_like(image)[:, :1], image[:, :-1]], 1)
print("prev_image:", prev_image.shape, flush=True)
print("is_first:", is_first.shape, flush=True)
# print("image:", image.shape, flush=True)
# print("action:", action.shape, flush=True)
# print("embed:", embed.shape, flush=True)
# print("is_first:", is_first.shape, flush=True)
if self._use_independent_transformer:
if self.use_independent_transformer_encoder:
print("transformer_embed.shape", transformer_embed.shape, flush=True)
_embed = transformer_embed
else:
print("embed.shape", embed.shape, flush=True)
_embed = embed
out, weight = self.calc_independent_transformer_hidden(_embed, action, is_first, training, return_weight=True)
t_states = dict()
t_states['t_transformer'] = out
for i in range(self._transformer.num_layers):
t_states[f't_weight_{i}'] = weight[f'dec{i}'].transpose([0, 2, 1, 3])
print(f't_weight_{i}.shape', t_states[f't_weight_{i}'].shape, flush=True) # [batch, length, head, length]
post, prior = common.static_scan(
lambda prev, inputs: self.obs_step(prev[0], *inputs, training=training, transformer_weight=transformer_weight),
(swap(prev_image), swap(action), swap(image), swap(embed), swap(transformer_embed), swap(is_first)), (state, state))
print("Post 1:")
for k, v in post.items():
print(k, v.shape, flush=True)
print("Prior 1:")
for k, v in prior.items():
print(k, v.shape, flush=True)
post = {k: swap(v) for k, v in post.items()}
prior = {k: swap(v) for k, v in prior.items()}
if self._use_independent_transformer:
post.update(t_states)
if self._use_independent_state_transformer:
_embed = tf.stop_gradient(post['stoch'])
if self._discrete:
shape = _embed.shape[:-2] + [self._stoch * self._discrete]
_embed = tf.reshape(_embed, shape)
out, weight = self.calc_independent_transformer_hidden(_embed, action, is_first, training, transformer=self._state_transformer, transformer_shift=True, return_weight=True)
x = self.get('state_transformer_out', tfkl.Dense, self._hidden)(out)
x = self.get('state_transformer_out_norm', NormLayer, self._norm)(x)
x = self._act(x)
state_transformer_stats = self._suff_stats_layer('state_transformer_dist', x)
for i in range(self._state_transformer.num_layers):
post[f't_state_weight_{i}'] = weight[f'dec{i}'].transpose([0, 2, 1, 3])
else:
state_transformer_stats = None
print("Post 2:")
for k, v in post.items():
print(k, v.shape, flush=True)
print("Prior 2:")
for k, v in prior.items():
print(k, v.shape, flush=True)
return post, prior, state_transformer_stats
@tf.function
def imagine(self, action, training, state=None, transformer_weight=False):
swap = lambda x: tf.transpose(x, [1, 0] + list(range(2, len(x.shape))))
if state is None:
state = self.initial(tf.shape(action)[0])
if self._use_independent_transformer:
_state = dict()
for k, v in state.items():
if k != 't_transformer' and not k.startswith('t_weight_') and not k.startswith('t_state_weight_'):
_state[k] = v
state = _state
assert isinstance(state, dict), state
action = swap(action)
prior = common.static_scan(lambda prev, inputs: self.img_step(prev, inputs, training=training, transformer_weight=transformer_weight), action, state)
# def _swap(k, v):
# print(k, v.shape)
# if k.startswith("t_"):
# return swap(v)
# else:
# return swap(v)
print('in imagine')
print('action.shape', action.shape)
print('state')
for k, v in state.items():
print(k, v.shape, flush=True)
print('prior')
for k, v in prior.items():
print(k, v.shape, flush=True)
prior = {k: swap(v) for k, v in prior.items()}
return prior
def get_feat(self, state):
stoch = self._cast(state['stoch'])
if self._discrete:
shape = stoch.shape[:-2] + [self._stoch * self._discrete]
stoch = tf.reshape(stoch, shape)
if self._exclude_deter_feat:
deter = tf.zeros_like(state['deter'])
else:
deter = state['deter']
return tf.concat([stoch, deter], -1)
def get_mask(self, name):
def create_stoch_mask(_n):
assert 0 <= _n <= self._stoch
if self._discrete:
z = self._discrete
else:
z = 1
return tf.concat((tf.ones(_n * z), tf.zeros((self._stoch - _n) * z)), 0)
if name == 'decoder':
stoch_mask = create_stoch_mask(self._stoch // 2)
elif name == 'reward' or name == 'discount' or name.startswith('int_reward'):
stoch_mask = create_stoch_mask(self._stoch)
elif name == 'transformer_reward':
return self._cast(tf.ones(self._deter))
elif name == 'myopic_reward':
return self._cast(tf.ones(self._hidden))
else:
raise NotImplementedError
mask = tf.concat((stoch_mask, tf.ones(self._deter)), 0)
return self._cast(mask)
def get_dist(self, state, ensemble=False):
if ensemble:
state = self._suff_stats_ensemble(state['deter'])
if self._discrete:
logit = state['logit']
logit = tf.cast(logit, tf.float32)
dist = tfd.Independent(common.OneHotDist(logit), 1)
else:
mean, std = state['mean'], state['std']
mean = tf.cast(mean, tf.float32)
std = tf.cast(std, tf.float32)
dist = tfd.MultivariateNormalDiag(mean, std)
return dist
@tf.function
def obs_step(self, prev_state, prev_image, prev_action, cur_image, embed, t_embed, is_first, training, sample=True, transformer_weight=False):
print("obs_step", transformer_weight)
# if is_first.any():
# print(list(prev_state.keys()), flush=True)
# prev_state, prev_action = tf.nest.map_structure(
# lambda x: tf.einsum(
# 'b,b...->b...', (1.0 - is_first.astype(tf.float32)).astype(x.dtype), x),
# (prev_state, prev_action))
# mask = is_first.astype(tf.float32).expand
print("is_first", is_first.shape)
def _mask_fn(x):
_mask = (1.0 - is_first.astype(tf.float32)).astype(x.dtype)
_shape = list(is_first.shape) + [1] * (len(x.shape) - 1)
return x * _mask.reshape(_shape)
prev_state, prev_action = tf.nest.map_structure(
_mask_fn, (prev_state, prev_action))
# mask_fn = lambda x: tf.einsum('b,b...->b...', 1.0 - is_first.astype(x.dtype), x)
# prev_action = mask_fn(prev_action)
# for k in prev_state.keys():
# # if not k.startswith('t_'):
# prev_state[k] = mask_fn(prev_state[k])
prior = self.img_step(prev_state, prev_action, training, sample=sample, transformer_weight=transformer_weight)
use_transformer = self.use_transformer and self._use_raw_input_in_transformer and not self._use_independent_transformer
if use_transformer:
# h_images = self._cast(tf.identity(prev_state['t_images']))
# h_actions = self._cast(tf.identity(prev_state['t_actions']))
# h_images = tf.stop_gradient(tf.concat([h_images[:, 1:], prev_image[:, tf.newaxis]], 1))
# h_actions = tf.stop_gradient(tf.concat([h_actions[:, 1:], prev_action[:, tf.newaxis]], 1))
# mask = 1. - self._cast(tf.math.equal(tf.reduce_sum(h_images ** 2, [-3, -2, -1]), 0)[:, :, tf.newaxis]) # [batch, mem, 1]
# h_images_embed = self._transformer_encoder({"image": h_images})
# h_out = tf.concat([h_images_embed, h_actions], -1)
# h_out = self.get('h_out', tfkl.Dense, self._hidden)(h_out)
# h_out = self.get('h_out_norm', NormLayer, self._norm)(h_out)
# h_out = h_out * mask
print("prev_image shape:", prev_image.shape)
print("cur_image shape:", cur_image.shape)
mask = tf.stop_gradient(1. - self._cast(tf.math.equal(tf.reduce_sum(prev_image ** 2, [-3, -2, -1]), 0.))) # [batch]
prev_image_embed = self._transformer_encoder({"image": prev_image})
print("prev_image_embed shape:", prev_image_embed.shape)
print("prev_action shape:", prev_action.shape)
# prev_out = tf.concat([prev_image_embed, prev_action, prev_state['t_counter']], -1)
prev_out = tf.concat([prev_image_embed, prev_action], -1)
# prev_out = prev_image_embed
prev_out = self.get('prev_out', tfkl.Dense, self._hidden)(prev_out)
prev_out = self.get('prev_out_norm', NormLayer, self._norm)(prev_out)
print("prev_out shape:", prev_out.shape)
prev_out = prev_out * mask[:, tf.newaxis]
print("prev_out shape:", prev_out.shape)
h_hidden = tf.identity(prev_state['t_hidden'])
h_hidden = tf.concat([h_hidden[:, 1:, :], prev_out[:, tf.newaxis, :]], 1)
cur_out = tf.concat([embed, prev_action], -1)
cur_out = self.get('cur_out', tfkl.Dense, self._hidden)(cur_out)
cur_out = self.get('cur_out_norm', NormLayer, self._norm)(cur_out)
transformer_out, weights, weights_norm, mask = self._transformer(h_hidden, cur_out, training=training)
print('transformer_out shape:', transformer_out.shape)
transformer_out = tf.concat([transformer_out, cur_out], -1)
transformer_out = self.get("transformer_out", tfkl.Dense, self._hidden)(transformer_out)
transformer_out = self.get("transformer_out_norm", NormLayer, self._norm)(transformer_out)
# x = tf.concat([prior['deter'], embed, tf.stop_gradient(self._transformer_encoder({"image": cur_image}))], -1)
if self._include_transformer_embed:
x = tf.concat([prior['deter'], embed, tf.stop_gradient(self._transformer_encoder({"image": cur_image}))], -1)
else:
x = tf.concat([prior['deter'], embed], -1)
x = self.get('t_obs_out', tfkl.Dense, self._hidden)(x)
x = self.get('t_obs_out_norm', NormLayer, self._norm)(x)
x = self._act(x)
else:
if self._include_transformer_embed:
x = tf.concat([prior['deter'], embed, tf.stop_gradient(t_embed)], -1)
else:
x = tf.concat([prior['deter'], embed], -1)
x = self.get('obs_out', tfkl.Dense, self._hidden)(x)
x = self.get('obs_out_norm', NormLayer, self._norm)(x)
x = self._act(x)
stats = self._suff_stats_layer('obs_dist', x)
dist = self.get_dist(stats)
stoch = dist.sample() if sample else dist.mode()
post = {'stoch': stoch, 'deter': prior['deter'], **stats}
if self._use_inside_transformer:
post['t_memory'] = prior['t_memory']
post['t_importance'] = prior['t_importance']
if self._myopic_prediction:
post['myopic_out'] = prior['myopic_out']
if self.use_transformer and not self._use_independent_transformer:
if self._use_raw_input_in_transformer:
post['t_hidden'] = tf.identity(h_hidden)
# post['t_images'] = tf.stop_gradient(tf.identity(h_images))
# post['t_actions'] = tf.stop_gradient(tf.identity(h_actions))
post['t_transformer'] = tf.identity(transformer_out)
post['t_counter'] = tf.stop_gradient(prev_state['t_counter'] + 1)
else:
post['t_hidden'] = tf.stop_gradient(tf.identity(prior['t_hidden']))
post['t_transformer'] = tf.identity(prior['t_transformer'])
if transformer_weight:
for i in range(self._transformer.num_layers):
if self._use_raw_input_in_transformer:
post[f't_weight_{i}'] = tf.stop_gradient(weights[f'dec{i}'])
post[f't_weight_norm_{i}'] = tf.stop_gradient(weights_norm[f'dec{i}'])
else:
post[f't_weight_{i}'] = tf.stop_gradient(tf.identity(prior[f't_weight_{i}']))
post[f't_weight_norm_{i}'] = tf.stop_gradient(tf.identity(prior[f't_weight_norm_{i}']))
print(f"post['t_weight_{i}']", post[f't_weight_{i}'].shape)
# post['mask'] = tf.stop_gradient(tf.identity(prior['mask']))
# post['weights'] = tf.stop_gradient(tf.identity(prior['weights']))
# post['x'] = tf.stop_gradient(tf.identity(prior['x']))
# post['t_stoch'] = tf.identity(prior['t_stoch'])
# post['t_action'] = tf.identity(prior['t_action'])
# post['t_len'] = tf.identity(prior['t_len'])
return post, prior
@tf.function
def img_step(self, prev_state, prev_action, training, sample=True, transformer_weight=False):
print("img_step", transformer_weight)
_prev_stoch = None
_prev_action = None
prev_stoch = self._cast(prev_state['stoch'])
prev_action = self._cast(prev_action)
# if self.use_transformer:
# cur_stoch = tf.expand_dims(self._cast(prev_state['stoch']), 1)
# cur_action = tf.expand_dims(self._cast(prev_action), 1)
# pos = prev_state['t_len'] % self._memory_size # [batch, 1]
# print(prev_state['t_len'], type(prev_state['t_stoch']).__name__)
# h_stoch = self._cast(tf.identity(prev_state['t_stoch']))
# h_action = self._cast(tf.identity(prev_state['t_action']))
# h_stoch = tf.roll(h_stoch, shift=-1, axis=1) # [batch, memory, stoch]
# h_action = tf.roll(h_action, shift=-1, axis=1)
# h_hidden = self._cast(tf.identity(prev_state['t_hidden']))
# h_hidden = tf.roll(h_hidden, shift=-1, axis=1)
# bs = tf.shape(h_stoch)[0]
# sl = tf.shape(h_stoch)[1]
# indices = tf.stack([tf.range(bs), tf.ones(bs, dtype=tf.int32) * (sl - 1)], 1)
# print(indices)
# h_stoch = tf.tensor_scatter_nd_update(h_stoch, indices, self._cast(tf.stop_gradient(prev_state['stoch'])))
# h_action = tf.tensor_scatter_nd_update(h_action, indices, self._cast(tf.stop_gradient(prev_action)))
# h_stoch[:, -1].assign(self._cast(prev_state['stoch']))
# h_action[:, -1].assign(self._cast(prev_action))
# h_stoch[pos] = self._cast(prev_state['stoch'])
# h_action[pos] = self._cast(prev_action)
# pos = tf.concat([tf.expand_dims(tf.range(pos.shape[0]), -1), pos], -1)
# h_stoch = tf.tensor_scatter_nd_update(h_stoch, pos, self._cast(prev_state['stoch']))
# h_action = tf.tensor_scatter_nd_update(h_action, pos, self._cast(prev_action))
# h_stoch[:, pos].assign(self._cast(prev_state['stoch']))
# h_action[:, pos].assign(self._cast(prev_action))
# if prev_state['t_stoch'] is not None:
# prev_stoch = tf.concat([self._cast(prev_state['t_stoch']), cur_stoch], 1)
# prev_action = tf.concat([self._cast(prev_state['t_action']), cur_action], 1)
# else:
# prev_stoch = cur_stoch
# prev_action = cur_action
# seq_len = tf.shape(prev_stoch)[1]
# if seq_len > self._max_memory:
# prev_stoch = prev_stoch[:, 1:]
# prev_action = prev_action[:, 1:]
# _prev_stoch = self._cast(tf.identity(h_stoch))
# _prev_action = self._cast(tf.identity(h_action))
# prev_stoch = _prev_stoch
# prev_action = _prev_action
# print(h_stoch[0].shape, h_action[1].shape)
# print(h_stoch.shape, h_action.shape)
# prev_stoch = tf.concat([h_stoch[:, pos + 1:], h_stoch[:, :pos + 1]], 1)
# prev_action = tf.concat([h_action[:, pos + 1:], h_action[:, :pos + 1]], 1)
# prev_stoch = tf.roll(h_stoch, )
if self._discrete:
shape = prev_stoch.shape[:-2] + [self._stoch * self._discrete]
prev_stoch = tf.reshape(prev_stoch, shape)
x = tf.concat([prev_stoch, prev_action], -1)
x = self.get('img_in', tfkl.Dense, self._hidden)(x)
x = self.get('img_in_norm', NormLayer, self._norm)(x)
x = self._act(x)
if self._use_forward_loss:
forward_x = tf.concat([prev_stoch, prev_action], -1)
forward_x = self.get('forward_img_in', tfkl.Dense, self._hidden)(forward_x)
forward_x = self.get('forward_img_in_norm', NormLayer, self._norm)(forward_x)
forward_x = self._act(forward_x)
if self._myopic_prediction:
myopic_out = tf.concat([prev_stoch, prev_action], -1)
myopic_out = self.get('myopic_out', tfkl.Dense, self._hidden)(myopic_out)
myopic_out = self.get('myopic_out_norm', NormLayer, self._norm)(myopic_out)
myopic_out = self._act(myopic_out)
use_transformer = self.use_transformer and (not self._use_raw_input_in_transformer or self._use_inside_transformer)
print('use_transformer in img?', use_transformer, flush=True)
if use_transformer:
if self._use_inside_transformer:
t_memory = self._cast(tf.identity(prev_state['t_memory']))
t_importance = self._cast(tf.identity(prev_state['t_importance']))
# last_token = tf.stop_gradient(tf.concat([prev_stoch, prev_state['deter']], 1))
last_token = tf.concat([prev_stoch, prev_state['deter']], 1)
last_importance = tf.stop_gradient(self._importance_head(last_token).mode())
if self._inside_transformer_include_action:
last_token = tf.concat([last_token, prev_action], 1)
if self._inside_transformer_include_importance:
last_token = tf.concat([last_token, last_importance[:, tf.newaxis]], 1)
t_memory = tf.concat((t_memory, last_token[:, tf.newaxis, :]), 1)
t_importance = tf.concat((t_importance, last_importance[:, tf.newaxis]), 1)
_, indices = tf.math.top_k(t_importance, k=self._inside_memory_size)
# t_memory = tf.stop_gradient(tf.gather(t_memory, indices, axis=1, batch_dims=1))
t_memory = tf.gather(t_memory, indices, axis=1, batch_dims=1)
t_importance = tf.stop_gradient(tf.gather(t_importance, indices, axis=1, batch_dims=1))
transformer_out, weights, _ = self._inside_transformer(t_memory, x, training=training)
else:
h_hidden = self._cast(tf.identity(prev_state['t_hidden']))
last_token = tf.stop_gradient(tf.concat([prev_stoch, prev_state['deter']], 1))
h_hidden = tf.stop_gradient(tf.concat([h_hidden[:, 1:, :], last_token[:, tf.newaxis, :]], 1))
transformer_out, weights, mask = self._transformer(h_hidden, x, training=training)
transformer_out_copy = tf.identity(transformer_out)
if self._transformer_version == 1:
if use_transformer:
x = self.get('transformer_out_ln', tfkl.LayerNormalization)(x + transformer_out)
deter = prev_state['deter']
x, deter = self._cell(x, [deter])
deter = deter[0]
elif self._transformer_version == 2:
if use_transformer:
x = deter = self.get('transformer_out_ln', tfkl.LayerNormalization)(x + transformer_out)
else:
deter = prev_state['deter']
x, deter = self._cell(x, [deter])
deter = deter[0] # Keras wraps the state in a list.
elif self._transformer_version == 3:
if use_transformer:
transformer_out = self.get('transformer_out_ln', tfkl.LayerNormalization)(x + transformer_out)
deter = prev_state['deter']
x, deter = self._cell(transformer_out, [deter])
deter = deter[0]
x = self.get('transformer_out_ln_2', tfkl.LayerNormalization)(x + transformer_out)
else:
deter = prev_state['deter']
x, deter = self._cell(x, [deter])
deter = deter[0]
# elif self.use_transformer:
# h_hidden = self._cast(tf.identity(prev_state['t_hidden']))
# # h_hidden = tf.stop_gradient(tf.roll(h_hidden, shift=-1, axis=1))
# # bs = tf.shape(h_hidden)[0]
# # sl = tf.shape(h_hidden)[1]
# # indices = tf.stack([tf.range(bs), tf.ones(bs, dtype=tf.int32) * (sl - 1)], 1)
# # print(indices)
# # h_hidden = tf.stop_gradient(tf.tensor_scatter_nd_update(h_hidden, indices, x))
# _x = tf.identity(x)
# # h_hidden = tf.stop_gradient(tf.concat([h_hidden[:, 1:, :], tf.stop_gradient(prev_stoch)[:, tf.newaxis, :]], 1))
# h_hidden = tf.stop_gradient(tf.concat([h_hidden[:, 1:, :], tf.stop_gradient(prev_stoch['deter'])[:, tf.newaxis, :]], 1))
# out, weights, mask = self._transformer(h_hidden, training=training) # [batch_size, seq_len, hidden]
# out = tf.concat([out[:, -1, :], _x], -1)
# out = self.get('transformer_out', tfkl.Dense, self._hidden)(out)
# x = deter = self.get('transformer_out_norm', NormLayer, self._norm)(out)
# # x = deter = h_hidden[:, -1, :] # [batch_size, hidden]
stats = self._suff_stats_ensemble(x)
index = tf.random.uniform((), 0, self._ensemble, tf.int32)
stats = {k: v[index] for k, v in stats.items()}
dist = self.get_dist(stats)
stoch = dist.sample() if sample else dist.mode() # [batch_size, ]
prior = {'stoch': stoch, 'deter': deter, **stats}
if self._use_forward_loss:
forward_stats = self._suff_stats_ensemble(forward_x)
forward_index = tf.random.uniform((), 0, self._ensemble, tf.int32)
forward_stats = {k: v[index] for k, v in forward_stats.items()}
forward_dist = self.get_dist(forward_stats)
forward_stoch = forward_dist.sample() if sample else forward_dist.mode()
prior['forward_stoch'] = forward_stoch
if self._myopic_prediction:
prior['myopic_out'] = myopic_out
if use_transformer:
if not self._use_inside_transformer:
prior['t_hidden'] = tf.stop_gradient(h_hidden)
prior['t_transformer'] = tf.identity(transformer_out_copy)
print("transformer_out", transformer_out_copy.shape, transformer_out_copy)
if transformer_weight:
for i in range(self._transformer.num_layers):
prior[f't_weight_{i}'] = tf.stop_gradient(weights[f'dec{i}'])
print(f"prior['t_weight_{i}']", prior[f't_weight_{i}'].shape)
else:
# prior['t_memory'] = tf.stop_gradient(t_memory)
prior['t_memory'] = t_memory
prior['t_importance'] = tf.stop_gradient(t_importance)
# prior['weights'] = tf.stop_gradient(weights['enc0'])
# prior['mask'] = tf.stop_gradient(mask)
# prior['x'] = tf.stop_gradient(_x)
# prior['t_stoch'] = tf.stop_gradient(_prev_stoch)
# prior['t_action'] = tf.stop_gradient(_prev_action)
# prior['t_len'] = prev_state['t_len'] + 1
# elif self.use_transformer:
# for k, v in prev_state.items():
# if k.startswith("t_"):
# prior[k] = tf.zeros_like(v)
return prior
def _suff_stats_ensemble(self, inp):
bs = list(inp.shape[:-1])
inp = inp.reshape([-1, inp.shape[-1]])
stats = []
for k in range(self._ensemble):
x = self.get(f'img_out_{k}', tfkl.Dense, self._hidden)(inp)
x = self.get(f'img_out_norm_{k}', NormLayer, self._norm)(x)
x = self._act(x)
stats.append(self._suff_stats_layer(f'img_dist_{k}', x))
stats = {
k: tf.stack([x[k] for x in stats], 0)
for k, v in stats[0].items()}
stats = {
k: v.reshape([v.shape[0]] + bs + list(v.shape[2:]))
for k, v in stats.items()}
return stats
def _suff_stats_layer(self, name, x):
if self._discrete:
x = self.get(name, tfkl.Dense, self._stoch * self._discrete, None)(x)
logit = tf.reshape(x, x.shape[:-1] + [self._stoch, self._discrete])
return {'logit': logit}
else:
x = self.get(name, tfkl.Dense, 2 * self._stoch, None)(x)
mean, std = tf.split(x, 2, -1)
std = {
'softplus': lambda: tf.nn.softplus(std),
'sigmoid': lambda: tf.nn.sigmoid(std),
'sigmoid2': lambda: 2 * tf.nn.sigmoid(std / 2),
}[self._std_act]()
std = std + self._min_std
return {'mean': mean, 'std': std}
def kl_loss(self, post, prior, forward, balance, free, free_avg):
kld = tfd.kl_divergence
sg = lambda x: tf.nest.map_structure(tf.stop_gradient, x)
lhs, rhs = (prior, post) if forward else (post, prior)
mix = balance if forward else (1 - balance)
if balance == 0.5:
value = kld(self.get_dist(lhs), self.get_dist(rhs))
loss = tf.maximum(value, free).mean()
else:
value_lhs = value = kld(self.get_dist(lhs), self.get_dist(sg(rhs)))
value_rhs = kld(self.get_dist(sg(lhs)), self.get_dist(rhs))
if free_avg:
loss_lhs = tf.maximum(value_lhs.mean(), free)
loss_rhs = tf.maximum(value_rhs.mean(), free)
else:
loss_lhs = tf.maximum(value_lhs, free).mean()
loss_rhs = tf.maximum(value_rhs, free).mean()
loss = mix * loss_lhs + (1 - mix) * loss_rhs
return loss, value
class Encoder(common.Module):
def __init__(
self, cnn_keys=r'image', mlp_keys=r'^$', act='elu', norm='none',
cnn_depth=48, cnn_kernels=(4, 4, 4, 4), mlp_layers=[400, 400, 400, 400]):
self._cnn_keys = re.compile(cnn_keys)
self._mlp_keys = re.compile(mlp_keys)
self._act = get_act(act)
self._norm = norm
self._cnn_depth = cnn_depth
self._cnn_kernels = cnn_kernels
self._mlp_layers = mlp_layers
self._once = True
@tf.function
def __call__(self, obs):
# print("in Encoder()", flush=True)
outs = [self._cnn(obs), self._mlp(obs)]
outs = [out for out in outs if out is not None]
self._once = False
# print("out Encoder()", flush=True)
return tf.concat(outs, -1)
def _cnn(self, obs):
# print("in Encoder._cnn()", flush=True)
inputs = {
key: tf.reshape(obs[key], (-1,) + tuple(obs[key].shape[-3:]))
for key in obs if self._cnn_keys.match(key)}
if not inputs:
# print("out Encoder._cnn()", flush=True)
return None
# print("1", flush=True)
self._once and print('Encoder CNN inputs:', list(inputs.keys()))
x = tf.concat(list(inputs.values()), -1)
x = x.astype(prec.global_policy().compute_dtype)
# print("2", flush=True)
for i, kernel in enumerate(self._cnn_kernels):
# print(f"3-{i} begin", flush=True)
depth = 2 ** i * self._cnn_depth
x = self.get(f'conv{i}', tfkl.Conv2D, depth, kernel, 2)(x)
# print(f"3-{i} conv", flush=True)
x = self.get(f'convnorm{i}', NormLayer, self._norm)(x)
# print(f"3-{i} convnorm", flush=True)
x = self._act(x)
# print(f"3-{i} act", flush=True)
print(f"encoder {i}-shape:", x.shape)
# print("out Encoder._cnn()", flush=True)
return x.reshape(list(obs['image'].shape[:-3]) + [-1])
def _mlp(self, obs):
print("in Encoder._mlp()", flush=True)
print(obs.keys())
valid_inputs = [key for key in obs if self._mlp_keys.match(key)]
if not valid_inputs:
return None
batch_dims = list(obs['reward'].shape)
inputs = {
key: tf.reshape(obs[key], [np.prod(batch_dims), -1])
for key in obs if self._mlp_keys.match(key)}
self._once and print('Encoder MLP inputs:', list(inputs.keys()))
# print('\n'.join([str((k, v.shape, v.dtype)) for k, v in inputs.items()]))
x = tf.concat(list(inputs.values()), -1)
x = x.astype(prec.global_policy().compute_dtype)
for i, width in enumerate(self._mlp_layers):
x = self.get(f'dense{i}', tfkl.Dense, width)(x)
x = self.get(f'densenorm{i}', NormLayer, self._norm)(x)
x = self._act(x)
# print("out Encoder._mlp()", flush=True)
return x.reshape(batch_dims + [-1])
class Decoder(common.Module):
def __init__(
self, shapes, cnn_keys=r'image', mlp_keys=r'^$', act='elu', norm='none',
cnn_depth=48, cnn_kernels=(4, 4, 4, 4), mlp_layers=[400, 400, 400, 400]):
self._shapes = shapes
# print('decoder:', shapes)
self._cnn_keys = re.compile(cnn_keys)
self._mlp_keys = re.compile(mlp_keys)
self._act = get_act(act)
self._norm = norm
self._cnn_depth = cnn_depth
self._cnn_kernels = cnn_kernels
self._mlp_layers = mlp_layers
self._once = True
def __call__(self, features):
features = tf.cast(features, prec.global_policy().compute_dtype)
dists = {**self._cnn(features), **self._mlp(features)}
self._once = False
return dists
def _cnn(self, features):
shapes = {
key: shape[-1] for key, shape in self._shapes.items()
if self._cnn_keys.match(key)}
if not shapes:
return {}
ConvT = tfkl.Conv2DTranspose
x = self.get('convin', tfkl.Dense, 32 * self._cnn_depth)(features)
x = tf.reshape(x, [-1, 1, 1, 32 * self._cnn_depth])
for i, kernel in enumerate(self._cnn_kernels):
depth = 2 ** (len(self._cnn_kernels) - i - 2) * self._cnn_depth
act, norm = self._act, self._norm
if i == len(self._cnn_kernels) - 1:
depth, act, norm = sum(shapes.values()), tf.identity, 'none'
x = self.get(f'conv{i}', ConvT, depth, kernel, 2)(x)
x = self.get(f'convnorm{i}', NormLayer, norm)(x)
x = act(x)
print(f"decoder {i}-shape:", x.shape)
x = x.reshape(features.shape[:-1] + x.shape[1:])
print("decoder shape:", shapes, x.shape)
means = tf.split(x, list(shapes.values()), -1)
dists = {
key: tfd.Independent(tfd.Normal(mean, 1), 3)
for (key, shape), mean in zip(shapes.items(), means)}
self._once and print('Decoder CNN outputs:', list(dists.keys()))
return dists
def _mlp(self, features):
shapes = {
key: shape for key, shape in self._shapes.items()
if self._mlp_keys.match(key)}
if not shapes:
return {}
x = features
for i, width in enumerate(self._mlp_layers):
x = self.get(f'dense{i}', tfkl.Dense, width)(x)
x = self.get(f'densenorm{i}', NormLayer, self._norm)(x)
x = self._act(x)
dists = {}
for key, shape in shapes.items():
dists[key] = self.get(f'dense_{key}', DistLayer, shape)(x)
self._once and print('Decoder MLP outputs:', list(dists.keys()))
return dists
class MLP(common.Module):
def __init__(self, shape, layers, units, act='elu', norm='none', **out):
self._shape = (shape,) if isinstance(shape, int) else shape
self._layers = layers
self._units = units
self._norm = norm
self._act = get_act(act)
self._out = out
def __call__(self, features):
x = tf.cast(features, prec.global_policy().compute_dtype)
x = x.reshape([-1, x.shape[-1]])
for index in range(self._layers):
x = self.get(f'dense{index}', tfkl.Dense, self._units)(x)
x = self.get(f'norm{index}', NormLayer, self._norm)(x)
x = self._act(x)
x = x.reshape(features.shape[:-1] + [x.shape[-1]])
return self.get('out', DistLayer, self._shape, **self._out)(x)
class GRUCell(tf.keras.layers.AbstractRNNCell):
def __init__(self, size, norm=False, act='tanh', update_bias=-1, **kwargs):
super().__init__()
self._size = size
self._act = get_act(act)
self._norm = norm
self._update_bias = update_bias
self._layer = tfkl.Dense(3 * size, use_bias=norm is not None, **kwargs)
if norm:
self._norm = tfkl.LayerNormalization(dtype=tf.float32)
@property
def state_size(self):
return self._size
@tf.function
def call(self, inputs, state):
state = state[0] # Keras wraps the state in a list.
parts = self._layer(tf.concat([inputs, state], -1))
if self._norm:
dtype = parts.dtype
parts = tf.cast(parts, tf.float32)
parts = self._norm(parts)
parts = tf.cast(parts, dtype)
reset, cand, update = tf.split(parts, 3, -1)
reset = tf.nn.sigmoid(reset)
cand = self._act(reset * cand)
update = tf.nn.sigmoid(update + self._update_bias)
output = update * cand + (1 - update) * state
return output, [output]
class DistLayer(common.Module):
def __init__(
self, shape, dist='mse', min_std=0.1, init_std=0.0):
# print(shape, dist, flush=True)
self._shape = shape
self._dist = dist
self._min_std = min_std
self._init_std = init_std
def __call__(self, inputs):
out = self.get('out', tfkl.Dense, np.prod(self._shape))(inputs)
out = tf.reshape(out, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
out = tf.cast(out, tf.float32)
# print("DistLayer.__call__", self._dist, flush=True)
if self._dist in ('normal', 'tanh_normal', 'trunc_normal'):
std = self.get('std', tfkl.Dense, np.prod(self._shape))(inputs)
std = tf.reshape(std, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
std = tf.cast(std, tf.float32)
if self._dist == 'mse':
dist = tfd.Normal(out, 1.0)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'normal':
dist = tfd.Normal(out, std)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'binary':
dist = tfd.Bernoulli(out)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'tanh_normal':
mean = 5 * tf.tanh(out / 5)
std = tf.nn.softplus(std + self._init_std) + self._min_std
dist = tfd.Normal(mean, std)
dist = tfd.TransformedDistribution(dist, common.TanhBijector())
dist = tfd.Independent(dist, len(self._shape))
return common.SampleDist(dist)
if self._dist == 'trunc_normal':
std = 2 * tf.nn.sigmoid((std + self._init_std) / 2) + self._min_std
dist = common.TruncNormalDist(tf.tanh(out), std, -1, 1)
return tfd.Independent(dist, 1)
if self._dist == 'onehot':
return common.OneHotDist(out)
NotImplementedError(self._dist)
class NormLayer(common.Module):
def __init__(self, name):
if name == 'none':
self._layer = None
elif name == 'layer':
self._layer = tfkl.LayerNormalization()
else:
raise NotImplementedError(name)
def __call__(self, features):
if not self._layer:
return features
return self._layer(features)
def get_act(name):
if name == 'none':
return tf.identity
if name == 'mish':
return lambda x: x * tf.math.tanh(tf.nn.softplus(x))
elif hasattr(tf.nn, name):
return getattr(tf.nn, name)
elif hasattr(tf, name):
return getattr(tf, name)
else:
raise NotImplementedError(name)
| 48.21853 | 183 | 0.590861 | 47,292 | 0.987699 | 0 | 0 | 24,584 | 0.51344 | 0 | 0 | 11,267 | 0.235313 |
dc52b025b8a7aecbe2b78cacb4452f154781595d | 88 | py | Python | catkin_ws/src/00-infrastructure/easy_logs/include/easy_logs/cli/__init__.py | yxiao1996/dev | e2181233aaa3d16c472b792b58fc4863983825bd | [
"CC-BY-2.0"
] | 2 | 2018-06-25T02:51:25.000Z | 2018-06-25T02:51:27.000Z | catkin_ws/src/00-infrastructure/easy_logs/include/easy_logs/cli/__init__.py | yxiao1996/dev | e2181233aaa3d16c472b792b58fc4863983825bd | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src/00-infrastructure/easy_logs/include/easy_logs/cli/__init__.py | yxiao1996/dev | e2181233aaa3d16c472b792b58fc4863983825bd | [
"CC-BY-2.0"
] | 2 | 2018-09-04T06:44:21.000Z | 2018-10-15T02:30:50.000Z | from .easy_logs_summary_imp import *
from .dropbox_links import *
from .require import * | 29.333333 | 36 | 0.806818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
dc533054ebdcef1d26d54fd0b72f7f97c4c5fec1 | 966 | py | Python | client/verta/tests/test_versioning/test_code.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | null | null | null | client/verta/tests/test_versioning/test_code.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 3 | 2021-05-11T23:56:43.000Z | 2022-02-27T11:16:47.000Z | client/verta/tests/test_versioning/test_code.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 3 | 2020-11-18T19:36:47.000Z | 2021-01-19T19:48:54.000Z | import pytest
from google.protobuf import json_format
import verta.code
from verta._internal_utils import _git_utils
class TestGit:
def test_no_autocapture(self):
code_ver = verta.code.Git(_autocapture=False)
# protobuf message is empty
assert not json_format.MessageToDict(
code_ver._msg,
including_default_value_fields=False,
)
def test_repr(self):
"""Tests that __repr__() executes without error"""
try:
_git_utils.get_git_repo_root_dir()
except OSError:
pytest.skip("not in git repo")
code_ver = verta.code.Git()
assert code_ver.__repr__()
class TestNotebook:
def test_no_autocapture(self):
code_ver = verta.code.Notebook(_autocapture=False)
# protobuf message is empty
assert not json_format.MessageToDict(
code_ver._msg,
including_default_value_fields=False,
)
| 24.15 | 58 | 0.652174 | 841 | 0.8706 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.125259 |
dc549680150086d507b8471b28cb4344cf23da7f | 1,617 | py | Python | 2020/python/day3.py | majormunky/advent_of_code | 4cccd7f3879e28e465bbc39176659bdd52bd70d6 | [
"MIT"
] | null | null | null | 2020/python/day3.py | majormunky/advent_of_code | 4cccd7f3879e28e465bbc39176659bdd52bd70d6 | [
"MIT"
] | null | null | null | 2020/python/day3.py | majormunky/advent_of_code | 4cccd7f3879e28e465bbc39176659bdd52bd70d6 | [
"MIT"
] | 1 | 2020-12-04T06:12:01.000Z | 2020-12-04T06:12:01.000Z | import sys
import common
import math
def get_filename():
filename = sys.argv[0]
filename = filename.split("/")[-1]
filename = filename.split(".")[0]
return filename
data = common.get_file_contents("data/{}_input.txt".format(get_filename()))
def run_slop_test(right, down):
# holds the map we will navigate
field = []
# current position on the map
pos = [0, 0]
# how many trees have we hit?
tree_count = 0
# build our field
for row in data:
field.append(list(row))
while True:
# check what the current spot is
if field[pos[1]][pos[0]] == "#":
# its a tree, increase our count
tree_count += 1
# increase our position further down the field
pos[0] += right
pos[1] += down
# if we reached the right side of the field
# then we reset our position back to the start
if pos[0] >= len(field[0]):
pos[0] = pos[0] - len(field[0])
# if we've reached the bottom of the field, we are done
if pos[1] >= len(field):
break
return tree_count
def part1():
return run_slop_test(3, 1)
def part2():
slope_tests = [
(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2),
]
answers = []
for slope in slope_tests:
answers.append(run_slop_test(slope[0], slope[1]))
return math.prod(answers)
def main():
part1_answer = part1()
part2_answer = part2()
print(f"Part 1: {part1_answer}")
print(f"Part 2: {part2_answer}")
if __name__ == '__main__':
main()
| 19.962963 | 75 | 0.560297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 449 | 0.277675 |
dc58494bc47cffa782c19449890df1e125bbd5c6 | 174 | py | Python | app/forms.py | makkenno/django_blog | 787854ad1bb3b68b885c02b85b38ef4369e89e22 | [
"MIT"
] | null | null | null | app/forms.py | makkenno/django_blog | 787854ad1bb3b68b885c02b85b38ef4369e89e22 | [
"MIT"
] | null | null | null | app/forms.py | makkenno/django_blog | 787854ad1bb3b68b885c02b85b38ef4369e89e22 | [
"MIT"
] | null | null | null | from django import forms
class PostForm(forms.Form):
title = forms.CharField(max_length=30, label='タイトル')
content = forms.CharField(label='内容', widget=forms.Textarea()) | 29 | 64 | 0.747126 | 159 | 0.854839 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.11828 |
dc58c056699f0b7d9f257b7b486ea3049baf4bec | 1,395 | py | Python | grievance/urls.py | AdarshNandanwar/CMS | 755b5e9244807f30a0bfb4b88d6528fc49bdd070 | [
"MIT"
] | null | null | null | grievance/urls.py | AdarshNandanwar/CMS | 755b5e9244807f30a0bfb4b88d6528fc49bdd070 | [
"MIT"
] | 9 | 2020-05-10T22:04:50.000Z | 2022-02-10T10:29:14.000Z | grievance/urls.py | AdarshNandanwar/CMS | 755b5e9244807f30a0bfb4b88d6528fc49bdd070 | [
"MIT"
] | 5 | 2020-07-11T08:03:07.000Z | 2021-01-19T16:40:19.000Z | from django.urls import path
from django.conf.urls import url
from django.urls import path,include
import grievance.views as VIEWS
from django.conf.urls.static import static
from django.conf import settings
app_name = 'grievance'
urlpatterns =[
# path('', VIEWS.HomeView.as_view())
path('level1/', VIEWS.level1HomeView.as_view()),
path('level1/type/<type>',VIEWS.level1RequestView.as_view()),
path('level1/student/<student_id>', VIEWS.level1StudentView.as_view()),
path('level1/psd-student-status/student/<student_id>',VIEWS.ViewOnlyPSDStudentPageView.as_view()),
path('', include('django.contrib.auth.urls')),
path('redirect/', VIEWS.RedirectView.as_view()),
path('student/', VIEWS.studentHomeView.as_view()),
path('level2/', VIEWS.level2HomeView.as_view()),
path('level2/type/<type>', VIEWS.level2RequestView.as_view()),
path('level2/student/<student_id>', VIEWS.level2StudentView.as_view()),
path('<level>/student-status/student/<student_id>', VIEWS.ViewOnlyStudentPageView.as_view()),
path('website-admin', VIEWS.websiteAdminHomePageView.as_view()),
# path('website-admin/change-deadline', VIEWS.changeDeadlineView.as_view()),
path('website-admin/add-user', VIEWS.addUser.as_view()),
path('get-deadline', VIEWS.showDeadlineInHeader.as_view()),
path('dev_page', VIEWS.devPageView.as_view()),
]
urlpatterns+= static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 42.272727 | 99 | 0.763441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 444 | 0.31828 |
dc58f7ceafa68cff55b75729d501728753773e9c | 2,545 | py | Python | fgsd_keras_sparse_implementation.py | ethaharikanaidu/FGSD | e283e6a805bc469c82e9d1dc63a1b07374701362 | [
"MIT"
] | null | null | null | fgsd_keras_sparse_implementation.py | ethaharikanaidu/FGSD | e283e6a805bc469c82e9d1dc63a1b07374701362 | [
"MIT"
] | null | null | null | fgsd_keras_sparse_implementation.py | ethaharikanaidu/FGSD | e283e6a805bc469c82e9d1dc63a1b07374701362 | [
"MIT"
] | null | null | null | '''This is the deep learning implementation in Keras for graph classification based on FGSD graph features.'''
import numpy as np
import scipy.io
import networkx as nx
from grakel import datasets
from scipy import sparse
from sklearn.utils import shuffle
from scipy import linalg
import time
import csv
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
########### LOAD DATA ###########
def return_dataset(file_name):
dd = datasets.fetch_dataset(file_name, verbose=True)
graph_list = []
for gg in dd.data:
v = set([i[0] for i in gg[0]]).union(set([i[1] for i in gg[0]]))
g_ = nx.Graph()
g_.add_nodes_from(v)
g_.add_edges_from([(i[0], i[1]) for i in gg[0]])
graph_list.append(g_)
y = dd.target
return graph_list, np.array(y)
########### SET PARAMETERS ###########
def FGSD(graphs,labels):
feature_matrix=[]
nbins=200
range_hist=(0,20)
for A in graphs:
L=nx.laplacian_matrix(A)
ones_vector=np.ones(L.shape[0])
fL=np.linalg.pinv(L.todense())
S=np.outer(np.diag(fL),ones_vector)+np.outer(ones_vector,np.diag(fL))-2*fL
hist, bin_edges = np.histogram(S.flatten(),bins=nbins,range=range_hist)
feature_matrix.append(hist)
feature_matrix=np.array(feature_matrix)
feature_matrix,data_labels_binary=shuffle(feature_matrix, labels)
return feature_matrix, data_labels_binary
########### TRAIN AND VALIDATE MODEL ###########
data = ["MUTAG","PROTEINS_full","NCI1","NCI109","DD","COLLAB","REDDIT-BINARY","REDDIT-MULTI-5K","IMDB-BINARY","IMDB-MULTI"]
#file = open("fgsd_res.csv",'a',newline='')
#res_writer = csv.writer(file, delimiter = ',', quotechar='|', quoting= csv.QUOTE_MINIMAL)
#header = ["dataset","accuracy","time"]
#res_writer.writerow(header)
for d in data:
graphs, labels = return_dataset(dataset)
print(" {} dataset loaded with {} number of graphs".format(d, len(graphs)))
start = time.time()
emb,y = FGSD(graphs, labels)
end = time.time()
print("total time taken: ", end-start)
model = RandomForestClassifier(n_estimators = 100)
res = cross_val_score(model,emb, y, cv = 10, scoring='accuracy')
print("10 fold cross validation accuracy: {}, for dataset ; {}".format(np.mean(res)*100, d))
to_write = [d, np.mean(res)*100, end-start]
print(to_write)
#res_writer.writerow(to_write)
#file.flush()
#file.close()
| 32.628205 | 124 | 0.64558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 732 | 0.287623 |
dc59dc5b71bc2df003fb4821b909fc61b42c94fa | 244 | py | Python | exercicios/exercicio046.py | TayAntony/python | c79d62a12f48965ed374e4c037287a162a368a40 | [
"MIT"
] | null | null | null | exercicios/exercicio046.py | TayAntony/python | c79d62a12f48965ed374e4c037287a162a368a40 | [
"MIT"
] | null | null | null | exercicios/exercicio046.py | TayAntony/python | c79d62a12f48965ed374e4c037287a162a368a40 | [
"MIT"
] | null | null | null | from time import sleep
from cores import *
print(f'{cores["azul"]}Em breve a queima de fogos irá começar...{limpar}')
for c in range(10, -1, -1):
print(c)
sleep(1)
print(f'{cores["vermelho"]}{fx["negrito"]}Feliz ano novo!{limpar} 🎆 🎆')
| 30.5 | 74 | 0.651639 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.551587 |
dc5aee0f9d7c54a22dcf5b4f46ece3b3c8bd5031 | 3,206 | py | Python | database_files/management/commands/database_files_cleanup.py | Edge-On-Demand/django-database-files-3000 | fb2f9f83c658dd2443c7bb5c3b1fad348a9a4b92 | [
"BSD-3-Clause"
] | null | null | null | database_files/management/commands/database_files_cleanup.py | Edge-On-Demand/django-database-files-3000 | fb2f9f83c658dd2443c7bb5c3b1fad348a9a4b92 | [
"BSD-3-Clause"
] | null | null | null | database_files/management/commands/database_files_cleanup.py | Edge-On-Demand/django-database-files-3000 | fb2f9f83c658dd2443c7bb5c3b1fad348a9a4b92 | [
"BSD-3-Clause"
] | null | null | null | from optparse import make_option
from django.apps import apps
from django.conf import settings
from django.core.files.storage import default_storage
from django.core.management.base import BaseCommand
from django.db.models import FileField, ImageField
from database_files.models import File
class Command(BaseCommand):
args = ''
help = 'Deletes all files in the database that are not referenced by ' + \
'any model fields.'
option_list = BaseCommand.option_list + (
make_option(
'--dryrun', action='store_true', dest='dryrun', default=False,
help='If given, only displays the names of orphaned files and does not delete them.'),
make_option(
'--filenames', default='', help='If given, only files with these names will be checked')
)
def handle(self, *args, **options):
tmp_debug = settings.DEBUG
settings.DEBUG = False
names = set()
dryrun = options['dryrun']
filenames = set(_.strip() for _ in options['filenames'].split(',') if _.strip())
try:
for model in apps.get_models():
print('Checking model %s...' % (model,))
for field in model._meta.fields:
if not isinstance(field, (FileField, ImageField)):
continue
# Ignore records with null or empty string values.
q = {'%s__isnull' % field.name: False}
xq = {field.name: ''}
subq = model.objects.filter(**q).exclude(**xq)
subq_total = subq.count()
subq_i = 0
for row in subq.iterator():
subq_i += 1
if subq_i == 1 or not subq_i % 100:
print('%i of %i' % (subq_i, subq_total))
f = getattr(row, field.name)
if f is None:
continue
if not f.name:
continue
names.add(f.name)
# Find all database files with names not in our list.
print('Finding orphaned files...')
orphan_files = File.objects.exclude(name__in=names)
if filenames:
orphan_files = orphan_files.filter(name__in=filenames)
orphan_files = orphan_files.only('name', 'size')
total_bytes = 0
orphan_total = orphan_files.count()
orphan_i = 0
print('Deleting %i orphaned files...' % (orphan_total,))
for f in orphan_files.iterator():
orphan_i += 1
if orphan_i == 1 or not orphan_i % 100:
print('%i of %i' % (orphan_i, orphan_total))
total_bytes += f.size
if dryrun:
print('File %s is orphaned.' % (f.name,))
else:
print('Deleting orphan file %s...' % (f.name,))
default_storage.delete(f.name)
print('%i total bytes in orphan files.' % total_bytes)
finally:
settings.DEBUG = tmp_debug
| 42.184211 | 100 | 0.523082 | 2,910 | 0.907673 | 0 | 0 | 0 | 0 | 0 | 0 | 597 | 0.186213 |
dc5b8bfcbc60defc15eb187ab1112d80fd84dce2 | 3,814 | py | Python | tests/test_utils.py | rahultesla/spectacles | dae1e938805fcdae7dea422801916322c0da5608 | [
"MIT"
] | 1 | 2019-05-08T14:31:24.000Z | 2019-05-08T14:31:24.000Z | tests/test_utils.py | rahultesla/spectacles | dae1e938805fcdae7dea422801916322c0da5608 | [
"MIT"
] | 60 | 2019-05-12T09:37:53.000Z | 2019-09-24T21:06:14.000Z | tests/test_utils.py | dbanalyticsco/spectacles | 93dea44f14d38b8441f21264b968a4f7845cb690 | [
"MIT"
] | 1 | 2019-07-16T16:53:10.000Z | 2019-07-16T16:53:10.000Z | from spectacles import utils
from spectacles.logger import GLOBAL_LOGGER as logger
from unittest.mock import MagicMock
import pytest
import unittest
TEST_BASE_URL = "https://test.looker.com"
def test_compose_url_one_path_component():
url = utils.compose_url(TEST_BASE_URL, ["api"])
assert url == "https://test.looker.com/api"
def test_compose_url_multiple_path_components():
url = utils.compose_url(TEST_BASE_URL, ["api", "3.0", "login", "42", "auth", "27"])
assert url == "https://test.looker.com/api/3.0/login/42/auth/27"
def test_compose_url_multiple_path_components_and_multiple_field_params():
url = utils.compose_url(
TEST_BASE_URL,
["api", "3.0", "login", "42", "auth", "27"],
{"fields": ["joins", "id"]},
)
assert url == "https://test.looker.com/api/3.0/login/42/auth/27?fields=joins%2Cid"
def test_compose_url_multiple_path_components_and_one_field_params():
url = utils.compose_url(
TEST_BASE_URL,
["api", "3.0", "login", "42", "auth", "27"],
{"fields": ["joins"]},
)
assert url == "https://test.looker.com/api/3.0/login/42/auth/27?fields=joins"
def test_compose_url_with_extra_slashes():
url = utils.compose_url(TEST_BASE_URL + "/", ["/api//", "3.0/login/"])
assert url == "https://test.looker.com/api/3.0/login"
human_readable_testcases = [
(0.000002345, "0 seconds"),
(0.02, "0 seconds"),
(60, "1 minute"),
(61.002, "1 minute and 1 second"),
(62, "1 minute and 2 seconds"),
(2790, "46 minutes and 30 seconds"),
]
@pytest.mark.parametrize("elapsed,expected", human_readable_testcases)
def test_human_readable(elapsed, expected):
human_readable = utils.human_readable(elapsed)
assert human_readable == expected
get_detail_testcases = [
("run_sql", "SQL "),
("run_assert", "data test "),
("run_content", "content "),
("OtherClass.validate", ""),
]
@pytest.mark.parametrize("fn_name,expected", get_detail_testcases)
def test_get_detail(fn_name, expected):
detail = utils.get_detail(fn_name)
assert detail == expected
class TestLogDurationDecorator(unittest.TestCase):
def test_log_SQL(self):
with self.assertLogs(logger=logger, level="INFO") as cm:
func = MagicMock()
func.__name__ = "run_sql"
decorated_func = utils.log_duration(func)
decorated_func()
self.assertIn("INFO:spectacles:Completed SQL validation in", cm.output[0])
def test_log_assert(self):
with self.assertLogs(logger=logger, level="INFO") as cm:
func = MagicMock()
func.__name__ = "run_assert"
decorated_func = utils.log_duration(func)
decorated_func()
self.assertIn("INFO:spectacles:Completed data test validation in", cm.output[0])
def test_log_content(self):
with self.assertLogs(logger=logger, level="INFO") as cm:
func = MagicMock()
func.__name__ = "run_content"
decorated_func = utils.log_duration(func)
decorated_func()
self.assertIn("INFO:spectacles:Completed content validation in", cm.output[0])
def test_log_other(self):
with self.assertLogs(logger=logger, level="INFO") as cm:
func = MagicMock()
func.__name__ = "OtherValidator.validate"
decorated_func = utils.log_duration(func)
decorated_func()
self.assertIn("INFO:spectacles:Completed validation in", cm.output[0])
def test_chunks_returns_expected_results():
to_chunk = list(range(10)) # has length of 10
assert len(list(utils.chunks(to_chunk, 5))) == 2
assert len(list(utils.chunks(to_chunk, 9))) == 2
assert len(list(utils.chunks(to_chunk, 10))) == 1
assert len(list(utils.chunks(to_chunk, 11))) == 1
| 33.752212 | 88 | 0.657577 | 1,403 | 0.367855 | 0 | 0 | 378 | 0.099109 | 0 | 0 | 943 | 0.247247 |
dc5b9dc07c1239f392e41d7198be67b491f60afc | 210 | py | Python | Language_Proficiency/Python/06_Itertools/04_itertools-combinations-with-replacement-English.py | canbecerik/HackerRank_solutions | 7f90a145fed29103401be94adc069a2731926d26 | [
"MIT"
] | 1 | 2020-04-20T10:27:37.000Z | 2020-04-20T10:27:37.000Z | Language_Proficiency/Python/06_Itertools/04_itertools-combinations-with-replacement-English.py | canbecerik/HackerRank_solutions | 7f90a145fed29103401be94adc069a2731926d26 | [
"MIT"
] | null | null | null | Language_Proficiency/Python/06_Itertools/04_itertools-combinations-with-replacement-English.py | canbecerik/HackerRank_solutions | 7f90a145fed29103401be94adc069a2731926d26 | [
"MIT"
] | null | null | null | from itertools import combinations_with_replacement
S, k = [i for i in input().split(" ")]
k = int(k)
combs = list(combinations_with_replacement(sorted(S), k))
combs.sort()
[print("".join(i)) for i in combs] | 23.333333 | 57 | 0.704762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.02381 |
dc5b9fd61dbefb1b306952f9b1298519ae01f873 | 1,839 | py | Python | Python files/Utilities.py | Dawlau/adugo | 6f20731990c0f7774313d3aa63b783cb5b202c5f | [
"MIT"
] | null | null | null | Python files/Utilities.py | Dawlau/adugo | 6f20731990c0f7774313d3aa63b783cb5b202c5f | [
"MIT"
] | null | null | null | Python files/Utilities.py | Dawlau/adugo | 6f20731990c0f7774313d3aa63b783cb5b202c5f | [
"MIT"
] | null | null | null | '''
Service class for utility functions that I need throught the app
'''
class Utilities:
@staticmethod
def clickedOn(onScreenCoordinates, grid, cell, clickCoords):
i, j = cell
cellX, cellY = onScreenCoordinates[i][j]
x, y = clickCoords
import math, constants
radius = math.sqrt((cellX - x) * (cellX - x) + (cellY - y) * (cellY - y))
radius = round(radius, 5)
if grid[i][j] is None:
if radius <= constants.SQUARE_SIDE_WIDTH:
return True
return False
elif radius <= constants.CIRCLE_RADIUS:
return True
return False
@staticmethod
def clickCoordsToCell(onScreenCoordinates, grid, clickCoords):
row, col = None, None
for i in range(len(grid)):
for j in range(len(grid[0])):
if Utilities.clickedOn(onScreenCoordinates, grid, (i, j), clickCoords):
row = i
col = j
break
return row, col
@staticmethod
def cellToNode(grid, cell):
i, j = cell
return i * len(grid[0]) + j
@staticmethod
def nodeToCell(grid, node):
i = node // len(grid[0])
j = node % len(grid[0])
return (i, j)
@staticmethod
def sign(x):
if x == 0:
return 0
if x < 0:
return -1
return 1
@staticmethod
def endGame(grid):
cntDogs = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 'd':
cntDogs += 1
elif grid[i][j] == 'j':
cougarPos = (i, j)
if cntDogs < 10:
return "Jaguar won"
import constants
blockCnt = 0
for nextNode in constants.ADJACENCY_LIST[Utilities.cellToNode(grid, cougarPos)]:
i, j = Utilities.nodeToCell(grid, nextNode)
if grid[i][j] == 'd':
blockCnt += 1
if blockCnt == len(constants.ADJACENCY_LIST[Utilities.cellToNode(grid, cougarPos)]):
return "Dogs won"
return None
@staticmethod
def inGrid(grid, cell):
i, j = cell
return 0 <= i < len(grid) and 0 <= j < len(grid[0])
| 20.897727 | 86 | 0.641109 | 1,765 | 0.959761 | 0 | 0 | 1,728 | 0.939641 | 0 | 0 | 103 | 0.056009 |
dc5c1d3d73bfddff57ac6572f11808dd361bd889 | 512 | py | Python | tests/unit_tests/conftest.py | kurumuz/datacrunch-python | 94b02c68da48b1017c0c837b3b37a97b4b2543a5 | [
"MIT"
] | 9 | 2021-01-07T17:56:11.000Z | 2022-02-05T01:42:42.000Z | tests/unit_tests/conftest.py | kurumuz/datacrunch-python | 94b02c68da48b1017c0c837b3b37a97b4b2543a5 | [
"MIT"
] | 3 | 2021-05-26T16:17:33.000Z | 2021-12-17T09:25:06.000Z | tests/unit_tests/conftest.py | kurumuz/datacrunch-python | 94b02c68da48b1017c0c837b3b37a97b4b2543a5 | [
"MIT"
] | 3 | 2021-05-16T00:47:40.000Z | 2021-12-17T08:59:16.000Z | import pytest
from unittest.mock import Mock
from datacrunch.http_client.http_client import HTTPClient
BASE_URL = "https://api-testing.datacrunch.io/v1"
ACCESS_TOKEN = "test-token"
CLIENT_ID = "0123456789xyz"
@pytest.fixture
def http_client():
auth_service = Mock()
auth_service._access_token = ACCESS_TOKEN
auth_service.is_expired = Mock(return_value=True)
auth_service.refresh = Mock(return_value=None)
auth_service._client_id = CLIENT_ID
return HTTPClient(auth_service, BASE_URL)
| 25.6 | 57 | 0.777344 | 0 | 0 | 0 | 0 | 298 | 0.582031 | 0 | 0 | 65 | 0.126953 |
dc600239b48d84e8a3c314d3367ea64d86c3cba7 | 414 | py | Python | apps/cadastro/models/__init__.py | AlcindoSchleder/ERPi-City | 63f8871cbe0be372dae81e2e363e6733d5d34210 | [
"MIT"
] | null | null | null | apps/cadastro/models/__init__.py | AlcindoSchleder/ERPi-City | 63f8871cbe0be372dae81e2e363e6733d5d34210 | [
"MIT"
] | 11 | 2019-11-06T09:24:51.000Z | 2021-03-24T19:18:12.000Z | apps/cadastro/models/__init__.py | AlcindoSchleder/ERPi-City | 63f8871cbe0be372dae81e2e363e6733d5d34210 | [
"MIT"
] | 1 | 2020-06-27T02:29:19.000Z | 2020-06-27T02:29:19.000Z | # -*- coding: utf-8 -*-
from .base import (
Pessoa,
PessoaFisica,
PessoaJuridica,
Endereco,
Telefone,
Email,
Site,
Banco,
Documento,
COD_UF,
UF_SIGLA,
)
from .empresa import Empresa, MinhaEmpresa
from .cliente import Cliente
from .fornecedor import Fornecedor
from .transportadora import Transportadora, Veiculo
from .produto import Produto, Unidade, Marca, Categoria
| 18.818182 | 55 | 0.698068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.055556 |
dc618ebead1a642930e82af83102e0b4aac293a7 | 69 | py | Python | dnfal/persons/__init__.py | altest-com/dnfal | d1fb15508c5583aeaa0957fcc3e37634d36bf237 | [
"MIT"
] | null | null | null | dnfal/persons/__init__.py | altest-com/dnfal | d1fb15508c5583aeaa0957fcc3e37634d36bf237 | [
"MIT"
] | 1 | 2020-03-31T17:04:09.000Z | 2020-03-31T17:04:09.000Z | dnfal/persons/__init__.py | altest-com/dnfal | d1fb15508c5583aeaa0957fcc3e37634d36bf237 | [
"MIT"
] | null | null | null | from .detection import BodyDetector
from .encoding import BodyEncoder | 34.5 | 35 | 0.869565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
dc61901098ab9709f063a98f0cc73f20f6a51887 | 3,113 | py | Python | irc.py | entuland/fogibot | e3afe14d53fe9d47178161d9311301c47c960507 | [
"MIT"
] | null | null | null | irc.py | entuland/fogibot | e3afe14d53fe9d47178161d9311301c47c960507 | [
"MIT"
] | null | null | null | irc.py | entuland/fogibot | e3afe14d53fe9d47178161d9311301c47c960507 | [
"MIT"
] | null | null | null | import asyncio
import re
from base64 import b64encode
# pattern taken from:
# https://mybuddymichael.com/writings/a-regular-expression-for-irc-messages.html
IRC_MSG_PATTERN = "^(?:[:](\S+) )?(\S+)(?: (?!:)(.+?))?(?: [:](.+))?$"
# class adapted from a sample kindly provided by https://github.com/emersonveenstra
class IRCClientProtocol(asyncio.Protocol):
def __init__(self, conf, message_handler, logger):
self._read_buffer = ""
self._conf = conf
self._log = logger
self._message_handler = message_handler
""" ========================================================================
protocol required methods
======================================================================== """
def connection_made(self, transport):
self.transport = transport
self.send_message("CAP REQ :sasl")
self.send_message(f"NICK {self._conf.botname}")
self.send_message(f"USER {self._conf.username} 8 * :{self._conf.realname}")
def data_received(self, data):
self._read_buffer += data.decode()
messages = re.split("\r\n", self._read_buffer)
# put back incomplete message, if any
self._read_buffer = messages.pop()
for msg in messages:
self.parse_message(msg)
""" ========================================================================
own methods
======================================================================== """
def send_message(self, message, log_this = True):
if log_this:
self._log("--> " + message)
self.transport.write(f"{message}\r\n".encode())
#---------------------------------------------------------------------------
# parse the message and process it directly in some cases
# pass it over to the external _message_handler() in all other cases
def parse_message(self, msg):
match = re.search(IRC_MSG_PATTERN, msg)
if not match:
return
sender = match.group(1)
if sender:
sender = sender.split("!")[0]
irc_command = match.group(2)
channel = match.group(3)
message = match.group(4)
if irc_command == "PING":
self.send_message(f"PONG :{message}", log_this = False)
# bail out immediately to avoid logging pings
return
self._log("<-- " + msg)
if irc_command == "CAP":
self.send_message("AUTHENTICATE PLAIN")
elif irc_command == "AUTHENTICATE":
authstring = b64encode(
f"\0{self._conf.username}\0{self._conf.password}".encode()
).decode()
self.send_message(f"AUTHENTICATE {authstring}", log_this = False)
elif irc_command == "900":
self.send_message("CAP END")
elif irc_command == "376":
for channel in self._conf.channels:
self.send_message(f"JOIN {channel}")
else:
self._message_handler(sender, irc_command, channel, message)
| 36.623529 | 83 | 0.512046 | 2,798 | 0.898811 | 0 | 0 | 0 | 0 | 0 | 0 | 1,201 | 0.385801 |
dc61c240ace363029e65db44548f6e19544dc644 | 5,077 | py | Python | NaiveNeurals/MLP/activation_functions.py | stovorov/NaiveNeurals | 88d91f3d4d39859eef372285f093643a447571a4 | [
"MIT"
] | 1 | 2019-01-16T13:45:47.000Z | 2019-01-16T13:45:47.000Z | NaiveNeurals/MLP/activation_functions.py | stovorov/NaiveNeurals | 88d91f3d4d39859eef372285f093643a447571a4 | [
"MIT"
] | 2 | 2020-03-24T16:17:06.000Z | 2020-03-30T23:53:16.000Z | NaiveNeurals/MLP/activation_functions.py | stovorov/NaiveNeurals | 88d91f3d4d39859eef372285f093643a447571a4 | [
"MIT"
] | null | null | null | """Module containing definitions of arithmetic functions used by perceptrons"""
from abc import ABC, abstractmethod
import numpy as np
from NaiveNeurals.utils import ErrorAlgorithm
class ActivationFunction(ABC):
"""Abstract function for defining functions"""
label = ''
@staticmethod
@abstractmethod
def function(arg: np.array) -> np.array:
"""Implementation of function
:param arg: float
:return: float
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def prime(cls, arg: np.array) -> np.array:
"""First derivative of implemented function
:param arg: float
:return: float
"""
raise NotImplementedError()
class Sigmoid(ActivationFunction):
"""Represents sigmoid function and its derivative"""
label = 'sigmoid'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate sigmoid(arg)
:param arg: float input value
:return: float sig(arg) value
"""
return 1 / (1 + np.exp(-arg))
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of sigmoid's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return cls.function(arg) * (1 - cls.function(arg))
class Tanh(ActivationFunction):
"""Represents hyperbolic tangent"""
label = 'tanh'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate tanh(arg)
:param arg: float input value
:return: float tanh(arg) value
"""
return np.tanh(arg)
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of tanh's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return 1 - np.tanh(arg)**2
class Linear(ActivationFunction):
"""Represents linear function"""
label = 'lin'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate lin(arg)
:param arg: float input value
:return: float lin(arg) value
"""
return arg
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of lin's prime derivative for given arg
:param arg: float input value
:return: float value
"""
ones = np.array(arg)
ones[::] = 1.0
return ones
class SoftMax(ActivationFunction):
"""Represents SoftMax function
The ``softmax`` function takes an N-dimensional vector of arbitrary real values and produces
another N-dimensional vector with real values in the range (0, 1) that add up to 1.0.
source: https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/
"""
label = 'softmax'
@staticmethod
def function(arg: np.array, beta: int = 20) -> np.array: # pylint: disable=arguments-differ
"""Calculate softmax(arg)
:param arg: float input value
:param beta: scaling parameter
:return: float softmax(arg) value
"""
exps = np.exp(beta * arg - beta * arg.max())
return exps / np.sum(exps)
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of softmax's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return cls.function(arg) * (1 - cls.function(arg))
class SoftPlus(ActivationFunction):
"""Represents softplus function"""
label = 'softplus'
@staticmethod
def function(arg: np.array) -> np.array:
"""Calculate softplus(arg)
:param arg: float input value
:return: float softmax(arg) value
"""
return np.log(1 + np.exp(arg))
@classmethod
def prime(cls, arg: np.array) -> np.array:
"""Calculate value of softplus's prime derivative for given arg
:param arg: float input value
:return: float value
"""
return 1/(1 + np.exp(-arg))
def get_activation_function(label: str) -> ActivationFunction:
"""Get activation function by label
:param label: string denoting function
:return: callable function
"""
if label == 'lin':
return Linear()
if label == 'sigmoid':
return Sigmoid()
if label == 'tanh':
return Tanh()
return Sigmoid()
def calculate_error(target: np.array, actual: np.array,
func_type: ErrorAlgorithm = ErrorAlgorithm.SQR) -> np.array:
"""Calculates error for provided actual and targeted data.
:param target: target data
:param actual: actual training data
:param func_type: denotes type of used function for error
:return: calculated error
"""
if func_type == ErrorAlgorithm.SQR:
return np.sum(0.5 * np.power(actual - target, 2), axis=1)
elif func_type == ErrorAlgorithm.CE:
return -1 * np.sum(target * np.log(abs(actual)), axis=1)
raise NotImplementedError()
| 26.035897 | 102 | 0.614142 | 3,903 | 0.768761 | 0 | 0 | 2,965 | 0.584006 | 0 | 0 | 2,548 | 0.501871 |
dc622f16d8375f421bc347edace50d1e05565b4c | 3,221 | py | Python | alevel.py | youxinweizhi/micropython-nano-gui | 630810de0dbc77e0416f1746240da1efaf68078b | [
"MIT"
] | null | null | null | alevel.py | youxinweizhi/micropython-nano-gui | 630810de0dbc77e0416f1746240da1efaf68078b | [
"MIT"
] | null | null | null | alevel.py | youxinweizhi/micropython-nano-gui | 630810de0dbc77e0416f1746240da1efaf68078b | [
"MIT"
] | 1 | 2021-07-17T09:57:08.000Z | 2021-07-17T09:57:08.000Z | # alevel.py Test/demo program for Adafruit ssd1351-based OLED displays
# Adafruit 1.5" 128*128 OLED display: https://www.adafruit.com/product/1431
# Adafruit 1.27" 128*96 display https://www.adafruit.com/product/1673
# The MIT License (MIT)
# Copyright (c) 2018 Peter Hinch
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# WIRING
# Pyb SSD
# 3v3 Vin
# Gnd Gnd
# X1 DC
# X2 CS
# X3 Rst
# X6 CLK
# X8 DATA
height = 96 # 1.27 inch 96*128 (rows*cols) display
# height = 128 # 1.5 inch 128*128 display
# Demo of initialisation procedure designed to minimise risk of memory fail
# when instantiating the frame buffer. The aim is to do this as early as
# possible before importing other modules.
import machine
import gc
from ssd1351 import SSD1351 as SSD
# Initialise hardware
pdc = machine.Pin('X1', machine.Pin.OUT_PP, value=0)
pcs = machine.Pin('X2', machine.Pin.OUT_PP, value=1)
prst = machine.Pin('X3', machine.Pin.OUT_PP, value=1)
spi = machine.SPI(1)
gc.collect() # Precaution befor instantiating framebuf
ssd = SSD(spi, pcs, pdc, prst, height) # Create a display instance
from nanogui import Dial, Pointer, refresh
refresh(ssd) # Initialise and clear display.
# Now import other modules
import utime
import pyb
from writer import CWriter
import arial10 # Font
GREEN = SSD.rgb(0, 255, 0)
RED = SSD.rgb(255, 0, 0)
BLUE = SSD.rgb(0, 0, 255)
YELLOW = SSD.rgb(255, 255, 0)
BLACK = 0
def main():
print('alevel test is running.')
CWriter.set_textpos(ssd, 0, 0) # In case previous tests have altered it
wri = CWriter(ssd, arial10, GREEN, BLACK, verbose=False)
wri.set_clip(True, True, False)
acc = pyb.Accel()
dial = Dial(wri, 5, 5, height = 75, ticks = 12, bdcolor=None,
label='Tilt Pyboard', style = Dial.COMPASS, pip=YELLOW) # Border in fg color
ptr = Pointer(dial)
scale = 1/40
while True:
x, y, z = acc.filtered_xyz()
# Depending on relative alignment of display and Pyboard this line may
# need changing: swap x and y or change signs so arrow points in direction
# board is tilted.
ptr.value(-y*scale + 1j*x*scale, YELLOW)
refresh(ssd)
utime.sleep_ms(200)
main()
| 35.01087 | 93 | 0.714064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,095 | 0.650419 |
dc65314f9ccc4d000bf72b8fe66d6e754777b9ee | 502 | py | Python | postmark_incoming/models.py | hkhanna/django-postmark-incoming | aa49b08ae27abf25bd164ac145de20e28b978725 | [
"MIT"
] | null | null | null | postmark_incoming/models.py | hkhanna/django-postmark-incoming | aa49b08ae27abf25bd164ac145de20e28b978725 | [
"MIT"
] | null | null | null | postmark_incoming/models.py | hkhanna/django-postmark-incoming | aa49b08ae27abf25bd164ac145de20e28b978725 | [
"MIT"
] | null | null | null | import logging
from django.db import models
logger = logging.getLogger(__name__)
class PostmarkWebhook(models.Model):
received_at = models.DateTimeField(auto_now_add=True)
body = models.JSONField()
headers = models.JSONField()
note = models.TextField(blank=True)
class Status(models.TextChoices):
NEW = "new"
PROCESSED = "processed"
ERROR = "error"
status = models.CharField(
max_length=127, choices=Status.choices, default=Status.NEW
)
| 23.904762 | 66 | 0.687251 | 417 | 0.830677 | 0 | 0 | 0 | 0 | 0 | 0 | 23 | 0.045817 |
dc6661641c3f324b5d546ee624c7243aacfaf151 | 1,372 | py | Python | models/ClassicNetwork/blocks/resnext_block.py | Dou-Yu-xuan/deep-learning-visal | 82978f454c7f2662d0eb972b5a4a1e5d5961b232 | [
"Apache-2.0"
] | 150 | 2021-12-10T01:21:06.000Z | 2022-03-30T08:13:42.000Z | models/ClassicNetwork/blocks/resnext_block.py | Curdboycc/torch-template-for-deep-learning | da1ebc527d44c8c5a524e757a1d784ba37ec2d5c | [
"Apache-2.0"
] | 2 | 2021-12-23T04:59:54.000Z | 2021-12-23T06:23:24.000Z | models/ClassicNetwork/blocks/resnext_block.py | Curdboycc/torch-template-for-deep-learning | da1ebc527d44c8c5a524e757a1d784ba37ec2d5c | [
"Apache-2.0"
] | 54 | 2021-12-10T03:36:27.000Z | 2022-03-22T11:57:12.000Z | # -*- coding: UTF-8 -*-
"""
@Cai Yichao 2020_09_08
"""
import torch.nn as nn
import torch.nn.functional as F
from models.blocks.SE_block import SE
from models.blocks.conv_bn import BN_Conv2d
class ResNeXt_Block(nn.Module):
"""
ResNeXt block with group convolutions
"""
def __init__(self, in_chnls, cardinality, group_depth, stride, is_se=False):
super(ResNeXt_Block, self).__init__()
self.is_se = is_se
self.group_chnls = cardinality * group_depth
self.conv1 = BN_Conv2d(in_chnls, self.group_chnls, 1, stride=1, padding=0)
self.conv2 = BN_Conv2d(self.group_chnls, self.group_chnls, 3, stride=stride, padding=1, groups=cardinality)
self.conv3 = nn.Conv2d(self.group_chnls, self.group_chnls * 2, 1, stride=1, padding=0)
self.bn = nn.BatchNorm2d(self.group_chnls * 2)
if self.is_se:
self.se = SE(self.group_chnls * 2, 16)
self.short_cut = nn.Sequential(
nn.Conv2d(in_chnls, self.group_chnls * 2, 1, stride, 0, bias=False),
nn.BatchNorm2d(self.group_chnls * 2)
)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.bn(self.conv3(out))
if self.is_se:
coefficient = self.se(out)
out *= coefficient
out += self.short_cut(x)
return F.relu(out)
| 33.463415 | 115 | 0.630466 | 1,177 | 0.857872 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.077259 |
dc66e87ae5e869bb818c926e0d037764b9733b9b | 2,948 | py | Python | email_api/api/migrations/0001_initial.py | PawlikMateusz/DjangoEmailRestApi | 1701033de109432a36252ce88389111c65e77249 | [
"MIT"
] | null | null | null | email_api/api/migrations/0001_initial.py | PawlikMateusz/DjangoEmailRestApi | 1701033de109432a36252ce88389111c65e77249 | [
"MIT"
] | null | null | null | email_api/api/migrations/0001_initial.py | PawlikMateusz/DjangoEmailRestApi | 1701033de109432a36252ce88389111c65e77249 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.13 on 2019-02-28 19:18
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Email',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('to', django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(max_length=254), size=None)),
('cc', django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(blank=True, max_length=254, null=True), size=None)),
('bcc', django.contrib.postgres.fields.ArrayField(base_field=models.EmailField(blank=True, max_length=254, null=True), size=None)),
('reply_to', models.EmailField(blank=True, default=None, max_length=254, null=True)),
('send_date', models.DateTimeField()),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Mailbox',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('host', models.CharField(max_length=100)),
('port', models.IntegerField(default=465)),
('login', models.CharField(max_length=30)),
('password', models.CharField(max_length=30)),
('email_from', models.CharField(max_length=50)),
('use_ssl', models.BooleanField(default=True)),
('is_active', models.BooleanField(default=False)),
('date', models.DateTimeField(auto_now_add=True)),
('last_update', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Template',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('subject', models.CharField(max_length=200)),
('text', models.TextField()),
('attachment', models.FileField(blank=True, null=True, upload_to='')),
('date', models.DateTimeField(auto_now_add=True)),
('last_update', models.DateTimeField(auto_now=True)),
],
),
migrations.AddField(
model_name='email',
name='mailbox',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='emails', to='api.Mailbox'),
),
migrations.AddField(
model_name='email',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='emails', to='api.Template'),
),
]
| 44.666667 | 147 | 0.592605 | 2,771 | 0.939959 | 0 | 0 | 0 | 0 | 0 | 0 | 330 | 0.11194 |
dc68739da52483fd2292c811c9370f529610773a | 1,985 | py | Python | snekcord/objects/base.py | asleep-cult/snekcord | 04302b0c65bad01c00fb047df3040d3234773689 | [
"MIT"
] | 9 | 2021-07-26T00:25:51.000Z | 2022-02-23T16:00:10.000Z | snekcord/objects/base.py | asleep-cult/snekcord | 04302b0c65bad01c00fb047df3040d3234773689 | [
"MIT"
] | 37 | 2021-05-29T16:16:22.000Z | 2022-02-13T13:57:25.000Z | snekcord/objects/base.py | asleep-cult/snekcord | 04302b0c65bad01c00fb047df3040d3234773689 | [
"MIT"
] | 4 | 2021-06-02T16:45:41.000Z | 2022-02-10T14:57:16.000Z | from .. import json
from ..exceptions import UnknownObjectError
from ..snowflake import Snowflake
__all__ = ('BaseObject', 'ObjectWrapper')
class _IDField(json.JSONField):
def __init__(self) -> None:
super().__init__('id', repr=True)
def construct(self, value: str) -> Snowflake:
return Snowflake(value)
def deconstruct(self, value: Snowflake) -> str:
return str(value)
class _ObjectMixin:
__slots__ = ()
def __init__(self, *, state) -> None:
self.state = state
def _get_id(self):
raise NotImplementedError
@property
def client(self):
return self.state.client
def is_cached(self) -> bool:
return self.id in self.state.keys()
async def fetch(self):
return await self.state.fetch(self.id)
class BaseObject(json.JSONObject, _ObjectMixin):
__slots__ = ('__weakref__', 'state')
id = _IDField()
def __init__(self, *, state) -> None:
super().__init__(state=state)
self.state.cache[self.id] = self
def _get_id(self):
return self.id
class ObjectWrapper(_ObjectMixin):
"""A wrapper for an object that might not be cached.
state:
The state that the wrapped object belongs to.
id:
The id of the wrapped object.
"""
__slots__ = ('__weakref__', 'state', 'id')
def __init__(self, *, state, id) -> None:
super().__init__(state=state)
self.set_id(id)
def __repr__(self) -> str:
return f'<ObjectWrapper id={self.id}>'
def _get_id(self):
return self.id
def set_id(self, id):
"""Changes the id of the wrapper."""
if id is not None:
self.id = self.state.unwrap_id(id)
else:
self.id = None
def unwrap(self):
"""Eqivalent to `self.state.get(self.id)`."""
object = self.state.get(self.id)
if object is None:
raise UnknownObjectError(self.id)
return object
| 22.303371 | 56 | 0.605038 | 1,832 | 0.922922 | 0 | 0 | 64 | 0.032242 | 69 | 0.034761 | 360 | 0.18136 |
dc6a50ddafe2adca5937a2495c5ef8b71db9e620 | 4,098 | py | Python | pyseus/ui/sidebar.py | impergator493/PySeus | faa7e5741acea9c3b8e0acad066905fa3b1c301b | [
"X11"
] | 2 | 2020-02-17T09:20:50.000Z | 2022-03-22T13:05:22.000Z | pyseus/ui/sidebar.py | impergator493/PySeus | faa7e5741acea9c3b8e0acad066905fa3b1c301b | [
"X11"
] | null | null | null | pyseus/ui/sidebar.py | impergator493/PySeus | faa7e5741acea9c3b8e0acad066905fa3b1c301b | [
"X11"
] | 1 | 2021-05-26T08:14:58.000Z | 2021-05-26T08:14:58.000Z | """GUI elements for use in the sidebar of the main window.
Classes
-------
**InfoWidget** - Sidebar widget for basic file information.
**MetaWidget** - Sidebar widget for basic metadata.
**ConsoleWidget** - Sidebar widget for basic text output.
"""
from PySide2.QtCore import QSize
from PySide2.QtWidgets import QFormLayout, QFrame, QLabel, QLineEdit, \
QScrollArea, QSizePolicy, QTextEdit, QVBoxLayout
from pyseus.settings import settings
class InfoWidget(QFrame):
"""The widget for file info. Displays path, scan ID and slice index."""
def __init__(self, app):
QFrame.__init__(self)
self.app = app
self.setLayout(QVBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.path = QLineEdit("")
self.scan = QLineEdit("")
self.slice = QLabel("")
info = QFrame()
info.setLayout(QFormLayout())
info.layout().addRow("Path:", self.path)
info.layout().addRow("Scan:", self.scan)
info.layout().addRow("Slice:", self.slice)
self.layout().addWidget(info)
self.setSizePolicy(QSizePolicy.Policy.Fixed,
QSizePolicy.Policy.Fixed)
self.updateGeometry()
def minimumSizeHint(self): # pylint: disable=C0103,R0201
"""Return widget size to ensure unifrom sidebar width."""
return QSize(int(settings["ui"]["sidebar_size"]), 80)
def update_slice(self, current, slices):
"""Update the displayed slice index."""
self.slice.setText("{} / {}".format(current+1, slices))
def update_scan(self, scan):
"""Update the displayed scan ID."""
self.scan.setText("{}".format(scan))
def update_path(self, path):
"""Update the displayed path."""
self.path.setText(path)
class MetaWidget(QScrollArea):
"""The widget for metadata display."""
def __init__(self, app):
QScrollArea.__init__(self)
self.app = app
self._reset_ui()
def _reset_ui(self):
"""Remove all metadata rows and reset the layout."""
table = QFrame()
table.setLayout(QFormLayout())
self.table = table.layout()
self.setWidgetResizable(True)
self.setWidget(table)
self.setSizePolicy(QSizePolicy.Policy.Fixed,
QSizePolicy.Policy.MinimumExpanding)
self.updateGeometry()
def minimumSizeHint(self): # pylint: disable=C0103,R0201
"""Return widget size to ensure unifrom sidebar width."""
return QSize(int(settings["ui"]["sidebar_size"]), 100)
def update_meta(self, data, more=True):
"""Set the displayed metadata; if *more* is True, display a button to
show all metadata."""
self._reset_ui()
if data is not None and data:
for key in sorted(data.keys()):
value = QLineEdit(str(data[key]))
self.table.addRow(key, value)
if more:
more_label = QLabel("more ...")
more_label.mouseReleaseEvent = self._show_more
self.table.addRow(more_label, None)
elif data is None or not data:
self.table.addRow("No metadata available", None)
def _show_more(self, event): # pylint: disable=W0613
"""Display a window showing all available metadata."""
self.app.show_metadata_window()
class ConsoleWidget(QTextEdit):
"""The widget for generic text output."""
def __init__(self, app):
QTextEdit.__init__(self)
self.app = app
self.setReadOnly(True)
self.setSizePolicy(QSizePolicy.Policy.Fixed,
QSizePolicy.Policy.MinimumExpanding)
self.updateGeometry()
def minimumSizeHint(self): # pylint: disable=C0103,R0201
"""Return widget size to ensure unifrom sidebar width."""
return QSize(int(settings["ui"]["sidebar_size"]), 100)
def print(self, text):
"""Print a simple text message to the console."""
self.append(text)
self.verticalScrollBar().setValue(self.verticalScrollBar().maximum())
| 31.282443 | 77 | 0.623963 | 3,641 | 0.888482 | 0 | 0 | 0 | 0 | 0 | 0 | 1,169 | 0.285261 |
dc6a5a8186b1238e84ceb0aa65bbef6db414344e | 59 | py | Python | problems/bfs/Solution909.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | problems/bfs/Solution909.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | problems/bfs/Solution909.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | """
BFS
"""
class Solution909:
pass
| 5.9 | 18 | 0.389831 | 27 | 0.457627 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.491525 |
dc6b05ea547d23e6274f4183f97bd2f598c016e9 | 713 | py | Python | class4/exercise7.py | linkdebian/pynet_course | e0de498078ab914b3535fa2ea5c2e71d6e3799fb | [
"Apache-2.0"
] | null | null | null | class4/exercise7.py | linkdebian/pynet_course | e0de498078ab914b3535fa2ea5c2e71d6e3799fb | [
"Apache-2.0"
] | null | null | null | class4/exercise7.py | linkdebian/pynet_course | e0de498078ab914b3535fa2ea5c2e71d6e3799fb | [
"Apache-2.0"
] | null | null | null | # Use Netmiko to change the logging buffer size (logging buffered <size>) on pynet-rtr2.
from getpass import getpass
from netmiko import ConnectHandler
def main():
password = getpass()
pynet_rtr2 = {'device_type': 'cisco_ios', 'ip': '50.76.53.27', 'username': 'pyclass', 'password': password, 'port': 8022}
ssh_connection = ConnectHandler(**pynet_rtr2)
ssh_connection.config_mode()
logging_command = ['logging buffered 20031']
ssh_connection.send_config_set(logging_command)
output = ssh_connection.send_command('show run | inc logging buffered')
outp = output.split()
print "The new size of logging buffered is %s" % outp[2]
if __name__ == "__main__":
main()
| 24.586207 | 125 | 0.701262 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.380084 |
dc6d51bc529682aa9cdad4fdc52398d8540304e3 | 2,106 | py | Python | textX-LS/core/setup.py | ipa-mdl/textX-LS | 26f6dac5794cbb142d40c26b074883237d6ff42f | [
"MIT"
] | 30 | 2019-03-04T14:59:34.000Z | 2022-03-12T11:10:06.000Z | textX-LS/core/setup.py | ipa-mdl/textX-LS | 26f6dac5794cbb142d40c26b074883237d6ff42f | [
"MIT"
] | 26 | 2018-12-31T00:31:57.000Z | 2022-02-26T09:04:44.000Z | textX-LS/core/setup.py | ipa-mdl/textX-LS | 26f6dac5794cbb142d40c26b074883237d6ff42f | [
"MIT"
] | 8 | 2019-01-10T19:36:23.000Z | 2021-12-12T07:59:47.000Z | # flake8: noqa
import codecs
import os
from platform import python_version
from setuptools import find_packages, setup
PACKAGE_NAME = "textx-ls-core"
VERSION = "0.2.0"
AUTHOR = "Daniel Elero"
AUTHOR_EMAIL = "danixeee@gmail.com"
DESCRIPTION = (
"a core language server logic for domain specific languages based on textX"
)
KEYWORDS = "textX DSL python domain specific languages"
LICENSE = "MIT"
URL = "https://github.com/textX/textX-LS/core"
packages = find_packages()
print("packages:", packages)
README = codecs.open(
os.path.join(os.path.dirname(__file__), "README.md"), "r", encoding="utf-8"
).read()
dev_require = ["bandit==1.5.1", "flake8==3.7.7", "textx_gen_vscode>=0.1.3"]
tests_require = ["coverage==4.5.3", "pytest==4.3.1", "pytest-cov==2.6.1"]
# pip install textx_ls_core[vscode]
vscode_require = ["textx_gen_vscode>=0.1.3"]
if python_version().startswith("3.6"): # For python 3.6
dev_require.append("black")
setup(
name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=README,
long_description_content_type="text/markdown",
url=URL,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
keywords=KEYWORDS,
license=LICENSE,
packages=packages,
include_package_data=True,
package_data={"": ["*.tx"]},
install_requires=["textX>=2.1.0", "wheel_inspect==1.3.0"],
extras_require={
"dev": dev_require,
"test": tests_require,
"vscode": vscode_require,
},
tests_require=tests_require,
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 28.459459 | 79 | 0.65812 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 992 | 0.471035 |