hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
385f371ceb5509451926af50b85caa241478e630
| 53
|
py
|
Python
|
tdict/__init__.py
|
royf/tdict
|
a54a33859587ff3324c8e306608d6c4306c569bc
|
[
"MIT"
] | null | null | null |
tdict/__init__.py
|
royf/tdict
|
a54a33859587ff3324c8e306608d6c4306c569bc
|
[
"MIT"
] | null | null | null |
tdict/__init__.py
|
royf/tdict
|
a54a33859587ff3324c8e306608d6c4306c569bc
|
[
"MIT"
] | null | null | null |
from .tdict import Tdict
from .tdict import tdictify
| 17.666667
| 27
| 0.811321
|
e8e163663b10ed8283b9a925abb00911a6b5967c
| 1,644
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_format30.py
|
eddiechapman/XlsxWriter
|
c636117ab30e64e4b7b824c9105595c42887c2c9
|
[
"BSD-2-Clause-FreeBSD"
] | 2,766
|
2015-01-02T17:36:42.000Z
|
2022-03-31T09:23:30.000Z
|
xlsxwriter/test/comparison/test_chart_format30.py
|
xiaolanmeng86/XlsxWriter
|
6c3ea23a410e8216eab8f5751e5544ffb444b3da
|
[
"BSD-2-Clause-FreeBSD"
] | 683
|
2015-01-03T09:55:02.000Z
|
2022-03-31T07:18:15.000Z
|
xlsxwriter/test/comparison/test_chart_format30.py
|
xiaolanmeng86/XlsxWriter
|
6c3ea23a410e8216eab8f5751e5544ffb444b3da
|
[
"BSD-2-Clause-FreeBSD"
] | 636
|
2015-01-05T01:57:08.000Z
|
2022-03-25T18:42:41.000Z
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_format30.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [108652416, 108655744]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'trendline': {'type': 'linear',
'intercept': 0.8,
'display_equation': True,
'display_r_squared': True},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.set_legend({'delete_series': [0, 2]})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 25.6875
| 79
| 0.51764
|
8ba5888ddaf4ee0cbeba11010b9be7fe40a520ae
| 1,770
|
py
|
Python
|
app/user/serializers.py
|
paulovitorweb/recipe-app-api
|
3477c32c445afbb66fce566b9ae66918b21c28df
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
paulovitorweb/recipe-app-api
|
3477c32c445afbb66fce566b9ae66918b21c28df
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
paulovitorweb/recipe-app-api
|
3477c32c445afbb66fce566b9ae66918b21c28df
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from core.models import User
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data: dict):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user: User = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 31.052632
| 74
| 0.649153
|
947787263acb375d700d754eb7f81e28087f789b
| 775
|
py
|
Python
|
opencvPYTesting/denseFlow.py
|
Dwight-Englewood/0207-Rover-Ruckus
|
bf97c86644af33d601857142c59df4b714c625cd
|
[
"MIT"
] | 2
|
2018-12-15T04:20:04.000Z
|
2018-12-15T04:20:09.000Z
|
opencvPYTesting/denseFlow.py
|
Dwight-Englewood-Robotics/0207-Rover-Ruckus
|
bf97c86644af33d601857142c59df4b714c625cd
|
[
"MIT"
] | 10
|
2018-10-29T17:12:17.000Z
|
2018-11-08T12:45:13.000Z
|
opencvPYTesting/denseFlow.py
|
Dwight-Englewood-Robotics/0207-Rover-Ruckus
|
bf97c86644af33d601857142c59df4b714c625cd
|
[
"MIT"
] | 1
|
2018-10-05T20:14:36.000Z
|
2018-10-05T20:14:36.000Z
|
import cv2 as cv
import numpy as np
cap = cv.VideoCapture("wood.mp4")
ret, frame1 = cap.read()
prvs = cv.cvtColor(frame1,cv.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[...,1] = 255
while(1):
ret, frame2 = cap.read()
next = cv.cvtColor(frame2,cv.COLOR_BGR2GRAY)
flow = cv.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)
mag, ang = cv.cartToPolar(flow[...,0], flow[...,1])
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv.normalize(mag,None,0,255,cv.NORM_MINMAX)
bgr = cv.cvtColor(hsv,cv.COLOR_HSV2BGR)
cv.imshow('frame2',bgr)
k = cv.waitKey(30) & 0xff
if k == 27:
break
elif k == ord('s'):
cv.imwrite('opticalfb.png',frame2)
cv.imwrite('opticalhsv.png',bgr)
prvs = next
cap.release()
| 31
| 81
| 0.616774
|
4aa72a0be43791b8ef2c9dfd3d02a6f543da3a7f
| 5,581
|
py
|
Python
|
src/textpruner/pruners/vocabulary_pruner.py
|
zchu-hit-scir/TextPruner
|
77871ce2bc525630d69c5bb4db48e68a2e532757
|
[
"Apache-2.0"
] | null | null | null |
src/textpruner/pruners/vocabulary_pruner.py
|
zchu-hit-scir/TextPruner
|
77871ce2bc525630d69c5bb4db48e68a2e532757
|
[
"Apache-2.0"
] | null | null | null |
src/textpruner/pruners/vocabulary_pruner.py
|
zchu-hit-scir/TextPruner
|
77871ce2bc525630d69c5bb4db48e68a2e532757
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
import os
from ..model_map import MODEL_MAP
from ..configurations import VocabularyPruningConfig, GeneralConfig
from .utils import infer_model_type
import logging
from tqdm import tqdm
from collections import abc
from typing import Optional
logger = logging.getLogger(__name__)
class VocabularyPruner:
"""
Args:
model : The model to be pruned.
tokenizer : The tokenizer for the model.
vocabulary_pruning_config : a :class:`~textpruner.configurations.VocabularyPruningConfig` object.
general_config : a :class:`~textpruner.configurations.GeneralConfig` object.
base_model_prefix : The prefix of the base model, i.e., the name of the base model as a member in the model. \
For example, if ``model.bert_encoder = BertModel(...)``, then the ``base_model_prefix`` is ``bert_encoder``. \
TextPruner will infer the ``base_model_prefix`` so we can leave its value as ``None``. But if it fails, users have to set its value explicitly.
"""
def __init__(self,
model : nn.Module,
tokenizer,
vocabulary_pruning_config : Optional[VocabularyPruningConfig] = None,
general_config : Optional[GeneralConfig] = None,
base_model_prefix : Optional[str] = None):
self.model = model
self.tokenizer = tokenizer
#infer model type
base_model, model_type = infer_model_type(model, base_model_prefix)
assert model_type in MODEL_MAP, \
f"Model type {self.model_type} is not supported, or not understood. Model type must be one of {list(MODEL_MAP.keys())}"
self.base_model = base_model
self.model_type = model_type
self.general_config = GeneralConfig() if general_config is None else general_config
self.vocabulary_pruning_config = VocabularyPruningConfig() if vocabulary_pruning_config is None else vocabulary_pruning_config
self.model.to(self.general_config.device)
self.model_vocab_resizer = MODEL_MAP[self.model_type]['resizer']
self.tokenizer_helper = MODEL_MAP[self.model_type]['tokenizer_helper']
self.pruned_token_ids = []
os.makedirs(self.general_config.output_dir, exist_ok=True)
self.save_dir = None
def prune(self, dataiter=None, additional_tokens=None,
additional_token_ids=None, save_model=True) -> Optional[str]:
'''
Prunes the vocabulay of the model and the tokenizer. The pruner will only keep the tokens in ``dataiter``, ``additional_tokens`` and ``additional_token_ids``.
* Use ``dataiter`` to generate a set of tokens from the raw texts.
* Use ``additional_tokens`` or ``additional_token_ids`` to specify the tokens or token_ids directly without running the tokenization.
Args:
dataiter : a list of pre-tokenized strings. These strings will be tokenized by the tokenizer to generate a set of tokens.
additional_tokens : a list of tokens. These tokens must be existed in the original vocabulary.
additional_token_ids : a list of ints representing the token ids.
save_model : whether to save the model when the pruning is finished.
'''
min_count = self.vocabulary_pruning_config.min_count
lm_head_pruning= self.vocabulary_pruning_config.prune_lm_head
pruned_token_ids = self.tokenizer_helper.get_token_ids(tokenizer=self.tokenizer,
dataiter=dataiter,
additional_tokens=additional_tokens,
additional_token_ids=additional_token_ids,
min_count=min_count)
self.model_vocab_resizer.set_embeddings(model=self.base_model, token_ids=pruned_token_ids)
if lm_head_pruning == 'auto' or lm_head_pruning is True:
is_success = self.model_vocab_resizer.set_lm_head(self.model, pruned_token_ids)
if is_success is False:
if lm_head_pruning is True:
logger.info("Cannot get output embeddings! Is your model has a MLM prediction head?")
else:
logger.info("Cannot get output embeddings. No LM head pruning.")
self.pruned_token_ids = pruned_token_ids
if save_model is True:
self.save_dir = self.save_model()
return self.save_dir
def save_model(self, dir_name = None) -> str:
if self.model_type.lower() in ['t5', 'mt5']:
vocab_size = self.base_model.shared.weight.shape[0]
else:
vocab_size = len(self.pruned_token_ids)
self.base_model.config.vocab_size = vocab_size
if dir_name is None:
save_dir = os.path.join(self.general_config.output_dir, f'pruned_V{vocab_size}')
else:
save_dir = os.path.join(self.general_config.output_dir, dir_name)
os.makedirs(save_dir, exist_ok=True)
# save tokenizer
self.tokenizer_helper.save_vocab(self.tokenizer, self.pruned_token_ids, save_dir)
# save weights
torch.save(self.model.state_dict(),os.path.join(save_dir,f'pytorch_model.bin'))
# save config
config_dir = os.path.join(save_dir)
self.base_model.config.save_pretrained(config_dir)
logger.info(f"Model and configuration have been saved to {save_dir}")
return save_dir
| 47.700855
| 166
| 0.658126
|
9a3d9e76acb80d35e7bb5844800ddaf71b2b712a
| 3,769
|
py
|
Python
|
data/states/audio.py
|
harryninja/Ping-Pong-Doen-a
|
eabc052872544f1acf53be67415d5fd2e233bc5d
|
[
"Unlicense"
] | null | null | null |
data/states/audio.py
|
harryninja/Ping-Pong-Doen-a
|
eabc052872544f1acf53be67415d5fd2e233bc5d
|
[
"Unlicense"
] | null | null | null |
data/states/audio.py
|
harryninja/Ping-Pong-Doen-a
|
eabc052872544f1acf53be67415d5fd2e233bc5d
|
[
"Unlicense"
] | null | null | null |
import pygame as pg
from .. import tools
'''isso aqui importa o audio do codigo Tools em que direciona pra os resources'''
class Audio(tools.States):
def __init__(self, screen_rect):
tools.States.__init__(self)
self.screen_rect = screen_rect
self.listings = [
'Background Music',
'-/+'
]
self.options = ['Back']
self.next_list = ['MENU']
self.title, self.title_rect = self.make_text('Audio', (75,75,75), (self.screen_rect.centerx, 75), 150)
self.pre_render_options()
self.pre_render_listings()
self.from_bottom = 400
self.from_bottom_listings = 225
self.spacer = 25
self.bg_music_modify(0)
def bg_music_modify(self, amount, sound=None):
self.background_music_volume += amount
if self.background_music_volume > .9:
self.background_music_volume = 1.0
volume_display = 'Max'
elif self.background_music_volume < .1:
self.background_music_volume = 0.0
volume_display = 'Mute'
else:
if sound:
self.button_sound.sound.play()
volume_display = '{:.1f}'.format(self.background_music_volume)
self.bg_music_num, self.bg_music_num_rect = self.make_text(
volume_display, (75,75,75), (self.screen_rect.centerx + 125, 250), 30)
self.background_music.setup(self.background_music_volume)
def get_event(self, event, keys):
if event.type == pg.QUIT:
self.quit = True
elif event.type == pg.KEYDOWN:
if event.key in [pg.K_UP, pg.K_w]:
self.change_selected_option(-1)
elif event.key in [pg.K_DOWN, pg.K_s]:
self.change_selected_option(1)
elif event.key == pg.K_RETURN:
self.select_option(self.selected_index)
elif event.key == self.controller_dict['back']:
#self.button_sound.sound.play()
self.done = True
self.next = 'MENU'
elif event.key in [pg.K_PLUS, pg.K_EQUALS]:
self.bg_music_modify(.1, 'play')
elif event.key in [pg.K_MINUS, pg.K_UNDERSCORE]:
self.bg_music_modify(-.1, 'play')
self.mouse_menu_click(event)
def update(self, now, keys):
#pg.mouse.set_visible(True)
self.mouse_hover_sound()
self.change_selected_option()
def render(self, screen):
screen.fill(self.bg_color)
screen.blit(self.title, self.title_rect)
screen.blit(self.bg_music_num, self.bg_music_num_rect)
for i,opt in enumerate(self.rendered["des"]):
opt[1].center = (self.screen_rect.centerx, self.from_bottom+i*self.spacer)
if i == self.selected_index:
rend_img,rend_rect = self.rendered["sel"][i]
rend_rect.center = opt[1].center
screen.blit(rend_img,rend_rect)
else:
screen.blit(opt[0],opt[1])
for i,opt in enumerate(self.rendered_listing['des']):
opt[1].center = (self.screen_rect.centerx, self.from_bottom_listings + i * self.spacer)
screen.blit(opt[0],opt[1])
def pre_render_listings(self):
listing_text = tools.Font.load('impact.ttf', 25)
rendered_msg = {"des":[],"sel":[]}
for listing in self.listings:
text = listing_text.render(listing, 1, (255,255,255))
text_rect = text.get_rect()
rendered_msg["des"].append((text, text_rect))
self.rendered_listing = rendered_msg
def cleanup(self):
pass
def entry(self):
pass
| 38.85567
| 110
| 0.580525
|
c38190d5b840b28dfcd9600dd7b4e93618f960b4
| 10,268
|
bzl
|
Python
|
debian_versions.bzl
|
ursinnDev/GoogleContainerTools_distroless
|
10aa518a3b9aabcf4a6401e226eff81c9602e654
|
[
"Apache-2.0"
] | null | null | null |
debian_versions.bzl
|
ursinnDev/GoogleContainerTools_distroless
|
10aa518a3b9aabcf4a6401e226eff81c9602e654
|
[
"Apache-2.0"
] | null | null | null |
debian_versions.bzl
|
ursinnDev/GoogleContainerTools_distroless
|
10aa518a3b9aabcf4a6401e226eff81c9602e654
|
[
"Apache-2.0"
] | null | null | null |
# AUTO GENERATED
DEBIAN_PACKAGE_VERSIONS = {
"amd64": {
"debian10": {
"base-files": "10.3+deb10u12",
"ca-certificates": "20200601~deb10u2",
"libbz2-1.0": "1.0.6-9.2~deb10u1",
"libc-bin": "2.28-10+deb10u1",
"libc6": "2.28-10+deb10u1",
"libdb5.3": "5.3.28+dfsg1-0.5",
"libffi6": "3.2.1-9",
"libgcc1": "1:8.3.0-6",
"libgomp1": "8.3.0-6",
"liblzma5": "5.2.4-1+deb10u1",
"libreadline7": "7.0-5",
"libsqlite3-0": "3.27.2-3+deb10u1",
"libssl1.1": "1.1.1n-0+deb10u2",
"libstdcpp6": "8.3.0-6",
"mime-support": "3.62",
"netbase": "5.6",
"openssl": "1.1.1n-0+deb10u2",
"readline-common": "7.0-5",
"tzdata": "2021a-0+deb10u5",
},
"debian11": {
"base-files": "11.1+deb11u3",
"ca-certificates": "20210119",
"dash": "0.5.11+git20200708+dd9ef66-5",
"fontconfig-config": "2.13.1-4.2",
"fonts-dejavu-core": "2.37-2",
"libbrotli1": "1.0.9-2+b2",
"libbz2-1.0": "1.0.8-4",
"libc-bin": "2.31-13+deb11u3",
"libc6": "2.31-13+deb11u3",
"libcom-err2": "1.46.2-2",
"libcrypt1": "1:4.4.18-4",
"libdb5.3": "5.3.28+dfsg1-0.8",
"libexpat1": "2.2.10-2+deb11u3",
"libffi7": "3.3-6",
"libfontconfig1": "2.13.1-4.2",
"libfreetype6": "2.10.4+dfsg-1",
"libgcc-s1": "10.2.1-6",
"libglib2.0-0": "2.66.8-1",
"libgomp1": "10.2.1-6",
"libgraphite2-3": "1.3.14-1",
"libgssapi-krb5-2": "1.18.3-6+deb11u1",
"libharfbuzz0b": "2.7.4-1",
"libjpeg62-turbo": "1:2.0.6-4",
"libk5crypto3": "1.18.3-6+deb11u1",
"libkeyutils1": "1.6.1-2",
"libkrb5-3": "1.18.3-6+deb11u1",
"libkrb5support0": "1.18.3-6+deb11u1",
"liblcms2-2": "2.12~rc1-2",
"liblzma5": "5.2.5-2.1~deb11u1",
"libmpdec3": "2.5.1-1",
"libncursesw6": "6.2+20201114-2",
"libnsl2": "1.3.0-2",
"libpcre3": "2:8.39-13",
"libpng16-16": "1.6.37-3",
"libpython3.9-minimal": "3.9.2-1",
"libpython3.9-stdlib": "3.9.2-1",
"libreadline8": "8.1-1",
"libsqlite3-0": "3.34.1-3",
"libssl1.1": "1.1.1n-0+deb11u2",
"libstdcpp6": "10.2.1-6",
"libtinfo6": "6.2+20201114-2",
"libtirpc3": "1.3.1-1",
"libuuid1": "2.36.1-8+deb11u1",
"netbase": "6.3",
"openjdk-11-jdk-headless": "11.0.15+10-1~deb11u1",
"openjdk-11-jre-headless": "11.0.15+10-1~deb11u1",
"openjdk-17-jdk-headless": "17.0.3+7-1~deb11u1",
"openjdk-17-jre-headless": "17.0.3+7-1~deb11u1",
"openssl": "1.1.1n-0+deb11u2",
"python3-distutils": "3.9.2-1",
"python3.9-minimal": "3.9.2-1",
"tzdata": "2021a-1+deb11u4",
"zlib1g": "1:1.2.11.dfsg-2+deb11u1",
},
},
"arm": {
"debian10": {
"base-files": "10.3+deb10u12",
"ca-certificates": "20200601~deb10u2",
"libbz2-1.0": "1.0.6-9.2~deb10u1",
"libc-bin": "2.28-10+deb10u1",
"libc6": "2.28-10+deb10u1",
"libdb5.3": "5.3.28+dfsg1-0.5",
"libffi6": "3.2.1-9",
"libgcc1": "1:8.3.0-6",
"libgomp1": "8.3.0-6",
"liblzma5": "5.2.4-1+deb10u1",
"libreadline7": "7.0-5",
"libsqlite3-0": "3.27.2-3+deb10u1",
"libssl1.1": "1.1.1n-0+deb10u2",
"libstdcpp6": "8.3.0-6",
"mime-support": "3.62",
"netbase": "5.6",
"openssl": "1.1.1n-0+deb10u2",
"readline-common": "7.0-5",
"tzdata": "2021a-0+deb10u5",
},
"debian11": {
"base-files": "11.1+deb11u3",
"ca-certificates": "20210119",
"libc-bin": "2.31-13+deb11u3",
"libc6": "2.31-13+deb11u3",
"libgcc-s1": "10.2.1-6",
"libgomp1": "10.2.1-6",
"libssl1.1": "1.1.1n-0+deb11u2",
"libstdcpp6": "10.2.1-6",
"netbase": "6.3",
"openssl": "1.1.1n-0+deb11u2",
"tzdata": "2021a-1+deb11u4",
},
},
"arm64": {
"debian10": {
"base-files": "10.3+deb10u12",
"ca-certificates": "20200601~deb10u2",
"libbz2-1.0": "1.0.6-9.2~deb10u1",
"libc-bin": "2.28-10+deb10u1",
"libc6": "2.28-10+deb10u1",
"libdb5.3": "5.3.28+dfsg1-0.5",
"libffi6": "3.2.1-9",
"libgcc1": "1:8.3.0-6",
"libgomp1": "8.3.0-6",
"liblzma5": "5.2.4-1+deb10u1",
"libreadline7": "7.0-5",
"libsqlite3-0": "3.27.2-3+deb10u1",
"libssl1.1": "1.1.1n-0+deb10u2",
"libstdcpp6": "8.3.0-6",
"mime-support": "3.62",
"netbase": "5.6",
"openssl": "1.1.1n-0+deb10u2",
"readline-common": "7.0-5",
"tzdata": "2021a-0+deb10u5",
},
"debian11": {
"base-files": "11.1+deb11u3",
"ca-certificates": "20210119",
"dash": "0.5.11+git20200708+dd9ef66-5",
"fontconfig-config": "2.13.1-4.2",
"fonts-dejavu-core": "2.37-2",
"libbrotli1": "1.0.9-2+b2",
"libbz2-1.0": "1.0.8-4",
"libc-bin": "2.31-13+deb11u3",
"libc6": "2.31-13+deb11u3",
"libcom-err2": "1.46.2-2",
"libcrypt1": "1:4.4.18-4",
"libdb5.3": "5.3.28+dfsg1-0.8",
"libexpat1": "2.2.10-2+deb11u3",
"libffi7": "3.3-6",
"libfontconfig1": "2.13.1-4.2",
"libfreetype6": "2.10.4+dfsg-1",
"libgcc-s1": "10.2.1-6",
"libglib2.0-0": "2.66.8-1",
"libgomp1": "10.2.1-6",
"libgraphite2-3": "1.3.14-1",
"libgssapi-krb5-2": "1.18.3-6+deb11u1",
"libharfbuzz0b": "2.7.4-1",
"libjpeg62-turbo": "1:2.0.6-4",
"libk5crypto3": "1.18.3-6+deb11u1",
"libkeyutils1": "1.6.1-2",
"libkrb5-3": "1.18.3-6+deb11u1",
"libkrb5support0": "1.18.3-6+deb11u1",
"liblcms2-2": "2.12~rc1-2",
"liblzma5": "5.2.5-2.1~deb11u1",
"libmpdec3": "2.5.1-1",
"libncursesw6": "6.2+20201114-2",
"libnsl2": "1.3.0-2",
"libpcre3": "2:8.39-13",
"libpng16-16": "1.6.37-3",
"libpython3.9-minimal": "3.9.2-1",
"libpython3.9-stdlib": "3.9.2-1",
"libreadline8": "8.1-1",
"libsqlite3-0": "3.34.1-3",
"libssl1.1": "1.1.1n-0+deb11u2",
"libstdcpp6": "10.2.1-6",
"libtinfo6": "6.2+20201114-2",
"libtirpc3": "1.3.1-1",
"libuuid1": "2.36.1-8+deb11u1",
"netbase": "6.3",
"openjdk-11-jdk-headless": "11.0.15+10-1~deb11u1",
"openjdk-11-jre-headless": "11.0.15+10-1~deb11u1",
"openjdk-17-jdk-headless": "17.0.3+7-1~deb11u1",
"openjdk-17-jre-headless": "17.0.3+7-1~deb11u1",
"openssl": "1.1.1n-0+deb11u2",
"python3-distutils": "3.9.2-1",
"python3.9-minimal": "3.9.2-1",
"tzdata": "2021a-1+deb11u4",
"zlib1g": "1:1.2.11.dfsg-2+deb11u1",
},
},
"ppc64le": {
"debian10": {
"base-files": "10.3+deb10u12",
"ca-certificates": "20200601~deb10u2",
"libbz2-1.0": "1.0.6-9.2~deb10u1",
"libc-bin": "2.28-10+deb10u1",
"libc6": "2.28-10+deb10u1",
"libdb5.3": "5.3.28+dfsg1-0.5",
"libffi6": "3.2.1-9",
"libgcc1": "1:8.3.0-6",
"libgomp1": "8.3.0-6",
"liblzma5": "5.2.4-1+deb10u1",
"libreadline7": "7.0-5",
"libsqlite3-0": "3.27.2-3+deb10u1",
"libssl1.1": "1.1.1n-0+deb10u2",
"libstdcpp6": "8.3.0-6",
"mime-support": "3.62",
"netbase": "5.6",
"openssl": "1.1.1n-0+deb10u2",
"readline-common": "7.0-5",
"tzdata": "2021a-0+deb10u5",
},
"debian11": {
"base-files": "11.1+deb11u3",
"ca-certificates": "20210119",
"libc-bin": "2.31-13+deb11u3",
"libc6": "2.31-13+deb11u3",
"libgcc-s1": "10.2.1-6",
"libgomp1": "10.2.1-6",
"libssl1.1": "1.1.1n-0+deb11u2",
"libstdcpp6": "10.2.1-6",
"netbase": "6.3",
"openssl": "1.1.1n-0+deb11u2",
"tzdata": "2021a-1+deb11u4",
},
},
"s390x": {
"debian10": {
"base-files": "10.3+deb10u12",
"ca-certificates": "20200601~deb10u2",
"libbz2-1.0": "1.0.6-9.2~deb10u1",
"libc-bin": "2.28-10+deb10u1",
"libc6": "2.28-10+deb10u1",
"libdb5.3": "5.3.28+dfsg1-0.5",
"libffi6": "3.2.1-9",
"libgcc1": "1:8.3.0-6",
"libgomp1": "8.3.0-6",
"liblzma5": "5.2.4-1+deb10u1",
"libreadline7": "7.0-5",
"libsqlite3-0": "3.27.2-3+deb10u1",
"libssl1.1": "1.1.1n-0+deb10u2",
"libstdcpp6": "8.3.0-6",
"mime-support": "3.62",
"netbase": "5.6",
"openssl": "1.1.1n-0+deb10u2",
"readline-common": "7.0-5",
"tzdata": "2021a-0+deb10u5",
},
"debian11": {
"base-files": "11.1+deb11u3",
"ca-certificates": "20210119",
"libc-bin": "2.31-13+deb11u3",
"libc6": "2.31-13+deb11u3",
"libgcc-s1": "10.2.1-6",
"libgomp1": "10.2.1-6",
"libssl1.1": "1.1.1n-0+deb11u2",
"libstdcpp6": "10.2.1-6",
"netbase": "6.3",
"openssl": "1.1.1n-0+deb11u2",
"tzdata": "2021a-1+deb11u4",
},
},
}
| 38.313433
| 62
| 0.425691
|
d30a4a6656bb369dd93770d9789449b5296645b2
| 673
|
py
|
Python
|
src/uefi/BaseTools/Source/Python/AutoGen/__init__.py
|
kkennett/oscore.dev
|
59e786f12f9af969211c95a9d2863b1767528341
|
[
"BSD-3-Clause"
] | 521
|
2019-03-29T15:44:08.000Z
|
2022-03-22T09:46:19.000Z
|
src/uefi/BaseTools/Source/Python/AutoGen/__init__.py
|
kkennett/oscore.dev
|
59e786f12f9af969211c95a9d2863b1767528341
|
[
"BSD-3-Clause"
] | 30
|
2019-06-04T17:00:49.000Z
|
2021-09-08T20:44:19.000Z
|
src/uefi/BaseTools/Source/Python/AutoGen/__init__.py
|
kkennett/oscore.dev
|
59e786f12f9af969211c95a9d2863b1767528341
|
[
"BSD-3-Clause"
] | 99
|
2019-03-29T16:04:13.000Z
|
2022-03-28T16:59:34.000Z
|
## @file
# Python 'AutoGen' package initialization file.
#
# This file is required to make Python interpreter treat the directory
# as containing package.
#
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
__all__ = ["AutoGen"]
| 37.388889
| 84
| 0.766716
|
812fb96b13d6ef94c9caf60f90b02c84fd6b3a31
| 2,084
|
py
|
Python
|
selenium_tests/test_A010_edit_password.py
|
garazoli/conduit
|
a261f4a6c539e5294889bfecf2be01eba72203dc
|
[
"MIT"
] | null | null | null |
selenium_tests/test_A010_edit_password.py
|
garazoli/conduit
|
a261f4a6c539e5294889bfecf2be01eba72203dc
|
[
"MIT"
] | null | null | null |
selenium_tests/test_A010_edit_password.py
|
garazoli/conduit
|
a261f4a6c539e5294889bfecf2be01eba72203dc
|
[
"MIT"
] | null | null | null |
# TC_020 teszteset
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from webdriver_manager.chrome import ChromeDriverManager
import time
import locators
# Headless mode
opt = Options()
opt.headless = True
driver = webdriver.Chrome(executable_path=ChromeDriverManager().install(), options=opt)
# Conduit megnyitasa
driver.get(locators.CON_URL)
time.sleep(10)
def test_edit_password():
# Sign in gombra kattintas
driver.find_element_by_xpath(locators.sign_in_x).click()
time.sleep(10)
# Bejelentkezesi adatok kitoltese
def sign_in(email, password):
driver.find_element_by_xpath(locators.si_email_x).send_keys(email)
driver.find_element_by_xpath(locators.si_password_x).send_keys(password)
driver.find_element_by_xpath(locators.sign_in_button_x).click()
time.sleep(10)
sign_in('testuser3@example.com', 'Abcd123$')
# Jelszo megvatoztatasa
def edit_settings(password):
driver.find_element_by_xpath(locators.settings_x).click()
time.sleep(10)
driver.find_element_by_xpath(locators.new_password_x).send_keys(password)
driver.find_element_by_xpath(locators.update_button_x).click()
time.sleep(10)
edit_settings('Efgh456$')
assert driver.find_element_by_xpath(locators.update_success_x).text == 'Update successful!'
driver.find_element_by_xpath(locators.update_ok_button_x).click()
time.sleep(10)
# Logout es bejelentkezes az uj jelszoval
driver.find_element_by_xpath(locators.log_out_x).click()
time.sleep(10)
driver.find_element_by_xpath(locators.sign_in_x).click()
time.sleep(10)
# Bejelentkezes uj adatokkal
sign_in('testuser3@example.com', 'Efgh456$')
assert driver.find_element_by_xpath(locators.user_x).text == 'testuser3'
# Eredeti allapot visszaallitasa:
edit_settings('Abcd123$')
assert driver.find_element_by_xpath(locators.update_success_x).text == 'Update successful!'
driver.find_element_by_xpath(locators.update_ok_button_x).click()
driver.close()
| 29.352113
| 95
| 0.754319
|
f9aff026c0bc3e9a8bb82011beb4da6859b8efc8
| 492
|
py
|
Python
|
RecoMET/METFilters/python/badGlobalMuonTaggersMiniAOD_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoMET/METFilters/python/badGlobalMuonTaggersMiniAOD_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoMET/METFilters/python/badGlobalMuonTaggersMiniAOD_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
badGlobalMuonTaggerMAOD = cms.EDFilter("BadGlobalMuonTagger",
muons = cms.InputTag("slimmedMuons"),
vtx = cms.InputTag("offlineSlimmedPrimaryVertices"),
muonPtCut = cms.double(20),
selectClones = cms.bool(False),
taggingMode = cms.bool(False),
)
cloneGlobalMuonTaggerMAOD = badGlobalMuonTaggerMAOD.clone(
selectClones = True
)
noBadGlobalMuonsMAOD = cms.Sequence(~cloneGlobalMuonTaggerMAOD + ~badGlobalMuonTaggerMAOD)
| 30.75
| 90
| 0.760163
|
d67bad03c4372e1679128c5595a0c2419571273a
| 31
|
py
|
Python
|
software_testing/__init__.py
|
rafaelleinio/software-testing
|
0cb9e95b65675121360a0e47ad127666c6c5ae7d
|
[
"MIT"
] | 1
|
2021-02-28T02:12:35.000Z
|
2021-02-28T02:12:35.000Z
|
software_testing/__init__.py
|
rafaelleinio/software-testing
|
0cb9e95b65675121360a0e47ad127666c6c5ae7d
|
[
"MIT"
] | null | null | null |
software_testing/__init__.py
|
rafaelleinio/software-testing
|
0cb9e95b65675121360a0e47ad127666c6c5ae7d
|
[
"MIT"
] | null | null | null |
"""Software testing module."""
| 15.5
| 30
| 0.677419
|
702be65056d84d2408e87fbd324aca6a4b6d0b07
| 3,144
|
py
|
Python
|
configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco_custom.py
|
Alphafrey946/Colocalization-of-fluorescent_signals-using-deep-learning-with-Manders-overlapping-coefficient
|
cc8386b6cc6cbe823983647119511aa87e121f23
|
[
"Apache-2.0"
] | 2
|
2021-02-23T04:42:07.000Z
|
2021-02-23T07:22:42.000Z
|
configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco_custom.py
|
Alphafrey946/Colocalization-of-fluorescent_signals-using-deep-learning-with-Manders-overlapping-coefficient
|
cc8386b6cc6cbe823983647119511aa87e121f23
|
[
"Apache-2.0"
] | null | null | null |
configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco_custom.py
|
Alphafrey946/Colocalization-of-fluorescent_signals-using-deep-learning-with-Manders-overlapping-coefficient
|
cc8386b6cc6cbe823983647119511aa87e121f23
|
[
"Apache-2.0"
] | 1
|
2021-02-23T04:42:08.000Z
|
2021-02-23T04:42:08.000Z
|
_base_ = [
'../_base_/datasets/my_custom_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FCOS',
pretrained='open-mmlab://detectron/resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False, # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='FCOSHead',
num_classes=3,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
total_epochs = 12
| 29.660377
| 75
| 0.606234
|
c052152f413e626eb8e3705cc403596f06d5dd12
| 4,011
|
py
|
Python
|
app/tests/v1/test_redflags.py
|
timmwrite/ireporter
|
bc5a64740cf920010d8353985ece04a96ecdfafd
|
[
"MIT"
] | 2
|
2019-01-21T10:47:40.000Z
|
2019-01-28T11:18:53.000Z
|
app/tests/v1/test_redflags.py
|
timmwrite/ireporter
|
bc5a64740cf920010d8353985ece04a96ecdfafd
|
[
"MIT"
] | 6
|
2018-11-22T10:15:09.000Z
|
2021-04-20T17:55:41.000Z
|
app/tests/v1/test_redflags.py
|
timmwrite/ireporter
|
bc5a64740cf920010d8353985ece04a96ecdfafd
|
[
"MIT"
] | 3
|
2018-12-03T08:16:45.000Z
|
2019-09-20T17:04:55.000Z
|
"""Tests for redflags run with pytest"""
import json
import unittest
from ... import create_app
class RedFlagTestCase(unittest.TestCase):
"""
This class represents the redflag test cases
"""
def setUp(self):
APP = create_app("testing")
self.app = APP.test_client()
self.redflag = {
"createdBy": 5,
"type": "red-flag",
"location": "66, 12",
"status": "resolved",
"images": "",
"videos": "",
"title": "NYS scandal",
"comment": "53"
}
def test_get_all_redflags(self):
"""method to test get all"""
response = self.app.get("/api/v1/red-flags")
self.assertEqual(response.status_code, 200)
def test_post_redflag(self):
response = self.app.post("/api/v1/red-flags", headers={'Content-Type': 'application/json'},
data=json.dumps(self.redflag))
result = json.loads(response.data)
self.assertEqual(response.status_code, 201)
self.assertIn('Created red-flag record', str(result))
def test_get_specific_redflag(self):
"""method to test if one can get a specific redflag"""
self.app.post("/api/v1/red-flags",
headers={'Content-Type': 'application/json'}, data=json.dumps(self.redflag))
response = self.app.get("/api/v1/red-flags/1")
json.loads(response.data)
self.assertEqual(response.status_code, 200)
def test_delete_specific_redflag(self):
"""method to test if one can delete a redflag"""
self.app.post("/api/v1/red-flags",
headers={'Content-Type': 'application/json'}, data=json.dumps(self.redflag))
response = self.app.delete("/api/v1/red-flags/1")
result = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertIn('red-flag record has been deleted', str(result))
def test_update_location_of_specific_redflag(self):
"""method to test edit of location"""
self.app.post("/api/v1/red-flags/1/location",
headers={'Content-Type': 'application/json'}, data=json.dumps(self.redflag))
response = self.app.patch("/api/v1/red-flags/1/location", headers={
'Content-Type': 'application/json'}, data=json.dumps({"location": "24.0 , 12.0"}))
result = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertIn("Updated red-flag record's location", str(result))
def test_update_comment_of_specific_redflag(self):
"""method to test edit of comment"""
self.app.post("/api/v1/red-flags/1/comment",
headers={'Content-Type': 'application/json'}, data=json.dumps(self.redflag))
response = self.app.patch("/api/v1/red-flags/1/comment", headers={'Content-Type': 'application/json'},
data=json.dumps({"comment": "hello cohart 35"}))
result = json.loads(response.data)
self.assertEqual(response.status_code, 200)
self.assertIn("Updated red-flag record's comment",
str(result))
def test_redflag_not_found(self):
"""Test a redflag not found"""
response = self.app.get("/api/v1/red-flags/100")
result = json.loads(response.data)
self.assertEqual(response.status_code, 404)
self.assertEqual(
result['error'], "red flag does not exit")
def test_wrong_comment_key(self):
"""Test wrong comment key used in redflag"""
response = self.app.patch("/api/v1/red-flags/1/comment", headers={'Content-Type': 'application/json'},
data=json.dumps({"comment1": "hello pac"}))
result = json.loads(response.data)
self.assertEqual(response.status_code, 500)
self.assertEqual(
result['error'], "KeyError Red-flag's comment not updated")
| 42.221053
| 116
| 0.598853
|
6268f8cc116bed3a67002ab4b72b36478f37a962
| 210
|
py
|
Python
|
docker/dockerTrader/gateway/ibGateway/__init__.py
|
OceanMT/vnpy_py3
|
0901e9381c54e615247eb753bac476a911c9ae5d
|
[
"MIT"
] | null | null | null |
docker/dockerTrader/gateway/ibGateway/__init__.py
|
OceanMT/vnpy_py3
|
0901e9381c54e615247eb753bac476a911c9ae5d
|
[
"MIT"
] | null | null | null |
docker/dockerTrader/gateway/ibGateway/__init__.py
|
OceanMT/vnpy_py3
|
0901e9381c54e615247eb753bac476a911c9ae5d
|
[
"MIT"
] | null | null | null |
# encoding: UTF-8
import vtConstant
from .ibGateway import IbGateway as gateway
gatewayName = 'IB'
gatewayDisplayName = gatewayName
gatewayType = vtConstant.GATEWAYTYPE_INTERNATIONAL
gatewayQryEnabled = False
| 23.333333
| 50
| 0.833333
|
fea4b5739933bd2964ac4b4e107c1a3808c56801
| 5,188
|
py
|
Python
|
aea/identity/base.py
|
bryanchriswhite/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 126
|
2019-09-07T09:32:44.000Z
|
2022-03-29T14:28:41.000Z
|
aea/identity/base.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 1,814
|
2019-08-24T10:08:07.000Z
|
2022-03-31T14:28:36.000Z
|
aea/identity/base.py
|
salman6049/agents-aea
|
d3f177a963eb855d9528555167255bf2b478f4ba
|
[
"Apache-2.0"
] | 46
|
2019-09-03T22:13:58.000Z
|
2022-03-22T01:25:16.000Z
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the identity class."""
from typing import Dict, Optional
from aea.common import Address
from aea.configurations.constants import DEFAULT_LEDGER
from aea.exceptions import enforce
from aea.helpers.base import SimpleId, SimpleIdOrStr
class Identity:
"""
The identity holds the public elements identifying an agent.
It includes:
- the agent name
- the addresses, a map from address identifier to address (can be a single key-value pair)
"""
__slots__ = (
"_name",
"_address",
"_public_key",
"_public_keys",
"_addresses",
"_default_address_key",
)
def __init__(
self,
name: SimpleIdOrStr,
address: Optional[str] = None,
public_key: Optional[str] = None,
addresses: Optional[Dict[str, Address]] = None,
public_keys: Optional[Dict[str, str]] = None,
default_address_key: str = DEFAULT_LEDGER,
) -> None:
"""
Instantiate the identity.
:param name: the name of the agent.
:param address: the default address of the agent.
:param public_key: the public key of the agent.
:param addresses: the addresses of the agent.
:param public_keys: the public keys of the agent.
:param default_address_key: the key for the default address.
"""
self._name = SimpleId(name)
if default_address_key is None:
raise ValueError(
"Provide a key for the default address."
) # pragma: nocover
if (address is None) == (addresses is None):
raise ValueError(
"Either provide a single address or a dictionary of addresses, and not both."
)
if address is None:
if addresses is None or len(addresses) == 0: # pragma: nocover
raise ValueError("Provide at least one pair of addresses.")
if public_key is not None:
raise ValueError(
"If you provide a dictionary of addresses, you must not provide a single public key."
)
if public_keys is None:
raise ValueError(
"If you provide a dictionary of addresses, you must provide its corresponding dictionary of public keys."
)
enforce(
public_keys.keys() == addresses.keys(),
"Keys in public keys and addresses dictionaries do not match. They must be identical.",
)
enforce(
default_address_key in addresses and default_address_key in public_keys,
"The default address key must exist in both addresses and public keys dictionaries.",
)
address = addresses[default_address_key]
public_key = public_keys[default_address_key]
if addresses is None:
if public_keys is not None:
raise ValueError(
"If you provide a single address, you must not provide a dictionary of public keys."
)
if public_key is None:
raise ValueError(
"If you provide a single address, you must provide its corresponding public key."
)
addresses = {default_address_key: address}
public_keys = {default_address_key: public_key}
self._address = address
self._addresses = addresses
self._public_key = public_key
self._public_keys = public_keys
self._default_address_key = default_address_key
@property
def default_address_key(self) -> str:
"""Get the default address key."""
return self._default_address_key
@property
def name(self) -> str:
"""Get the agent name."""
return str(self._name)
@property
def addresses(self) -> Dict[str, Address]:
"""Get the addresses."""
return self._addresses
@property
def address(self) -> Address:
"""Get the default address."""
return self._address
@property
def public_keys(self) -> Dict[str, str]:
"""Get the public keys."""
return self._public_keys # type: ignore
@property
def public_key(self) -> str:
"""Get the default public key."""
return self._public_key # type: ignore
| 35.054054
| 125
| 0.593678
|
f7bd493a4a6b9c29c45be6e230287ac9d5505f79
| 5,661
|
py
|
Python
|
links/tests.py
|
timptner/farafmb.de
|
2b154278d8b44ea3adecafcb8554c1b0b0055e01
|
[
"MIT"
] | null | null | null |
links/tests.py
|
timptner/farafmb.de
|
2b154278d8b44ea3adecafcb8554c1b0b0055e01
|
[
"MIT"
] | 1
|
2022-02-17T20:28:19.000Z
|
2022-02-17T20:28:19.000Z
|
links/tests.py
|
timptner/farafmb.de
|
2b154278d8b44ea3adecafcb8554c1b0b0055e01
|
[
"MIT"
] | null | null | null |
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
from django.http import HttpResponse, HttpResponseRedirect
from django.test import TestCase, RequestFactory, override_settings, SimpleTestCase
from django.urls import reverse
from .admin import LinkAdmin
from .forms import ChangeOrderForm
from .models import Link
from .views import change_order
class LinkTests(TestCase):
def test_object_representation(self):
link = Link.objects.create(url='https://www.example.org', text='Example', position=1)
self.assertEqual(str(link), link.text)
class LinkAdminTests(TestCase):
def setUp(self) -> None:
site = AdminSite()
self.factory = RequestFactory()
self.user = User.objects.create(username='john', email='john@example.org', password='secret')
self.admin = LinkAdmin(Link, site)
def test_initial_data(self):
request = self.factory.get(reverse('admin:links_link_changelist'))
request.user = self.user
initial_data = self.admin.get_changeform_initial_data(request)
self.assertEqual(initial_data, {'icon': 'fas fa-link'})
def test_mark_as_active_action(self):
request = self.factory.get(reverse('admin:links_link_changelist'))
request.user = self.user
# Required when using messages in action
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
Link.objects.bulk_create([
Link(url='https://www.example1.org', text='Example 1', position=1, is_active=False),
Link(url='https://www.example2.org', text='Example 2', position=2, is_active=False),
Link(url='https://www.example2.org', text='Example 3', position=3, is_active=False),
])
queryset = Link.objects.all()
self.admin.mark_as_active(request, queryset)
for link in queryset:
self.assertTrue(link.is_active)
def test_mark_as_inactive_action(self):
request = self.factory.get(reverse('admin:links_link_changelist'))
request.user = self.user
# Required when using messages in action
setattr(request, 'session', 'session')
messages = FallbackStorage(request)
setattr(request, '_messages', messages)
Link.objects.bulk_create([
Link(url='https://www.example1.org', text='Example 1', position=1, is_active=True),
Link(url='https://www.example2.org', text='Example 2', position=2, is_active=True),
Link(url='https://www.example2.org', text='Example 3', position=3, is_active=True),
])
queryset = Link.objects.all()
self.admin.mark_as_inactive(request, queryset)
for link in queryset:
self.assertFalse(link.is_active)
@override_settings(LANGUAGE_CODE='en-us')
class LinkViewTests(TestCase):
def test_index(self):
url = reverse('links:index')
self.assertEqual(url, '/links/')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@override_settings(LANGUAGE_CODE='en-us')
class ChangeOrderFormTests(TestCase):
def test_clean_order_valid(self):
form = ChangeOrderForm({'order': '4,8,15,16,23,42'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['order'], [4, 8, 15, 16, 23, 42])
def test_clean_order_invalid(self):
form = ChangeOrderForm({'order': '4,8,x,23,42'})
self.assertFalse(form.is_valid())
self.assertIn('order', form.errors)
self.assertEqual(form.errors['order'], ["Not all positions are integers."])
def test_save(self):
link1 = Link.objects.create(position=0)
link2 = Link.objects.create(position=1)
link3 = Link.objects.create(position=2)
order = f'{link3.pk},{link2.pk},{link1.pk}'
form = ChangeOrderForm({'order': order})
self.assertTrue(form.is_valid())
form.save()
link1.refresh_from_db()
self.assertEqual(link1.position, 2)
link2.refresh_from_db()
self.assertEqual(link2.position, 1)
link3.refresh_from_db()
self.assertEqual(link3.position, 0)
class ChangeOrderViewTests(TestCase):
def setUp(self) -> None:
self.factory = RequestFactory()
self.user = User.objects.create_superuser(username='John', email='john@example.org', password='secret')
def test_change_order(self):
url = reverse('links:change_order')
self.assertEqual(url, '/links/change_order/')
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_change_order_get(self):
request = self.factory.get('links:change_order')
request.user = self.user
response = change_order(request)
self.assertEqual(response.status_code, 200)
def test_change_order_post(self):
link = Link.objects.create()
request = self.factory.post('links:change_order')
request.user = self.user
request.POST = {'order': f'{link.pk}'}
response = change_order(request)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.headers['Location'], reverse('links:change_order_done'))
class ChangeOrderDoneViewTests(SimpleTestCase):
def test_change_order_done(self):
url = reverse('links:change_order_done')
self.assertEqual(url, '/links/change_order/done/')
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
| 39.587413
| 111
| 0.671613
|
511843f2dcfdb8b7b3bd6a16b30b5f5e3871f92c
| 1,411
|
py
|
Python
|
samples/openapi3/client/petstore/python-legacy/test/test_class_model.py
|
JigarJoshi/openapi-generator
|
785535b8d6881b358463994823abbda2b26ff42e
|
[
"Apache-2.0"
] | 11,868
|
2018-05-12T02:58:07.000Z
|
2022-03-31T21:19:39.000Z
|
samples/openapi3/client/petstore/python-legacy/test/test_class_model.py
|
JigarJoshi/openapi-generator
|
785535b8d6881b358463994823abbda2b26ff42e
|
[
"Apache-2.0"
] | 9,672
|
2018-05-12T14:25:43.000Z
|
2022-03-31T23:59:30.000Z
|
samples/openapi3/client/petstore/python-legacy/test/test_class_model.py
|
JigarJoshi/openapi-generator
|
785535b8d6881b358463994823abbda2b26ff42e
|
[
"Apache-2.0"
] | 4,776
|
2018-05-12T12:06:08.000Z
|
2022-03-31T19:52:51.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import petstore_api
from petstore_api.models.class_model import ClassModel # noqa: E501
from petstore_api.rest import ApiException
class TestClassModel(unittest.TestCase):
"""ClassModel unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ClassModel
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = petstore_api.models.class_model.ClassModel() # noqa: E501
if include_optional :
return ClassModel(
_class = ''
)
else :
return ClassModel(
)
def testClassModel(self):
"""Test ClassModel"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 27.134615
| 174
| 0.666194
|
95c4be79e7b6e8cbffed4d2d0d16dbb8ad7d80c4
| 1,199
|
py
|
Python
|
tools/metrics/histograms/update_net_trust_anchors.py
|
zipated/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools/metrics/histograms/update_net_trust_anchors.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools/metrics/histograms/update_net_trust_anchors.py
|
cangulcan/src
|
2b8388091c71e442910a21ada3d97ae8bc1845d3
|
[
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates NetTrustAnchors enum in histograms.xml file with values read
from net/data/ssl/root_stores/root_stores.json.
If the file was pretty-printed, the updated version is pretty-printed too.
"""
import json
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import path_util
import update_histogram_enum
NET_ROOT_CERTS_PATH = 'net/data/ssl/root_stores/root_stores.json'
def main():
if len(sys.argv) > 1:
print >>sys.stderr, 'No arguments expected!'
sys.stderr.write(__doc__)
sys.exit(1)
with open(path_util.GetInputFile(NET_ROOT_CERTS_PATH)) as f:
root_stores = json.load(f)
spki_enum = {}
spki_enum[0] = 'Unknown or locally-installed trust anchor'
for spki, spki_data in sorted(root_stores['spkis'].items()):
spki_enum[int(spki_data['id'])] = spki
update_histogram_enum.UpdateHistogramFromDict(
'NetTrustAnchors', spki_enum, NET_ROOT_CERTS_PATH,
os.path.basename(__file__))
if __name__ == '__main__':
main()
| 27.883721
| 74
| 0.740617
|
8073802c60fe9f9b41fad29ad7abe384a7d8473e
| 3,829
|
py
|
Python
|
platform/handlers/user.py
|
THU-KEG/ECTE
|
db657c347b64ead086ccc79c2837df1235088b5d
|
[
"MIT"
] | null | null | null |
platform/handlers/user.py
|
THU-KEG/ECTE
|
db657c347b64ead086ccc79c2837df1235088b5d
|
[
"MIT"
] | null | null | null |
platform/handlers/user.py
|
THU-KEG/ECTE
|
db657c347b64ead086ccc79c2837df1235088b5d
|
[
"MIT"
] | 1
|
2021-08-31T03:23:37.000Z
|
2021-08-31T03:23:37.000Z
|
import logging
import time
import tqdm
class User:
def __init__(self, user_id, recommender=None):
assert type(user_id) is int
self.user_id = user_id
self.answered_questions = []
self.answer_count = 0
self.diamond_count = 0
if recommender:
self.recommender = recommender
from . import main_handler
self.ignored = bool(
main_handler.data_provider.exesql(f"SELECT ignored FROM users WHERE id = {self.user_id}")[0][0])
self.blocked = bool(
main_handler.data_provider.exesql(f"SELECT blocked FROM users WHERE id = {self.user_id}")[0][0])
if self.ignored:
logging.info(f"user {self.user_id} is ignored")
if self.blocked:
logging.info(f"user {self.user_id} is blocked")
def ignore_me(self, is_init=False):
if self.ignored and not is_init:
logging.warning(f"Trying to ignore {self.user_id} which has already been ignored. Action canceled")
return
self.ignored = True
logging.info(f"exceed_max_answers_count = {self.recommender.exceed_max_answers_count} + {len(self.answered_questions)}")
self.recommender.exceed_max_answers_count += len(self.answered_questions)
logging.info(f"Ignoring user {self.user_id}")
for question in tqdm.tqdm(self.answered_questions):
question.unchoose(self)
def release_me(self):
if not self.ignored:
logging.warning(f"Trying to release {self.user_id} which has not been ignored. Action canceled")
return
self.ignored = False
logging.info(f"exceed_max_answers_count = {self.recommender.exceed_max_answers_count} - {len(self.answered_questions)}")
self.recommender.exceed_max_answers_count -= len(self.answered_questions)
logging.info(f"Releasing user {self.user_id}")
for question in tqdm.tqdm(self.answered_questions):
question.choose(self, question.answers[self.user_id])
def exploit(self):
best_question, best_match_score = None, -999999
for question in self.recommender.current_questions:
if question.is_task_valid and self.user_id not in question.answers:
question.given_time = time.time()
try:
# Ours algorithm
score = (question.posterior @ self.alpha)
except AttributeError:
# KBIS algorithm directly return `question`
return question
logging.debug(f"{self.user_id} is not in {question.question_id}'s answers: {question.answers.keys()}")
logging.debug(f"{self.user_id} and {question.question_id}'s score is {score}'")
if score > best_match_score:
best_question = question
best_match_score = score
if not best_question:
raise RuntimeError(f"User {self.user_id} has been assigned all his/her valid questions in current question pool.\n "
f"But he/she may not answer it. Anyway there is no more to recommend.")
logging.info(f"recommending {best_question.question_id} for {self.user_id}")
return best_question
def get_partial_accuracy(self, answered_questions):
correct_count = 0
total_count = 0
for question in answered_questions:
if self.user_id in question.answers:
answer_node = question.answers[self.user_id]
if answer_node.get_subtree_path() == question.gold_answer:
correct_count += 1
else:
total_count += 1
if total_count:
self.partial_accuracy = correct_count / total_count
| 47.8625
| 128
| 0.625751
|
936f5a5b622520c1b6863cc326e6d0cefd00b4d8
| 22,183
|
py
|
Python
|
cinderclient/tests/unit/v1/test_shell.py
|
scottdangelo/cinderclient-api-microversions
|
a0df4c76f2959ffed08cf65fd53de03484b1c0bc
|
[
"CNRI-Python",
"Apache-1.1"
] | null | null | null |
cinderclient/tests/unit/v1/test_shell.py
|
scottdangelo/cinderclient-api-microversions
|
a0df4c76f2959ffed08cf65fd53de03484b1c0bc
|
[
"CNRI-Python",
"Apache-1.1"
] | null | null | null |
cinderclient/tests/unit/v1/test_shell.py
|
scottdangelo/cinderclient-api-microversions
|
a0df4c76f2959ffed08cf65fd53de03484b1c0bc
|
[
"CNRI-Python",
"Apache-1.1"
] | null | null | null |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from requests_mock.contrib import fixture as requests_mock_fixture
from cinderclient import client
from cinderclient import exceptions
from cinderclient import shell
from cinderclient.v1 import shell as shell_v1
from cinderclient.tests.unit.v1 import fakes
from cinderclient.tests.unit import utils
from cinderclient.tests.unit.fixture_data import keystone_client
class ShellTest(utils.TestCase):
FAKE_ENV = {
'CINDER_USERNAME': 'username',
'CINDER_PASSWORD': 'password',
'CINDER_PROJECT_ID': 'project_id',
'OS_VOLUME_API_VERSION': '1',
'CINDER_URL': keystone_client.BASE_URL,
}
# Patch os.environ to avoid required auth info.
def setUp(self):
"""Run before each test."""
super(ShellTest, self).setUp()
for var in self.FAKE_ENV:
self.useFixture(fixtures.EnvironmentVariable(var,
self.FAKE_ENV[var]))
self.shell = shell.OpenStackCinderShell()
# HACK(bcwaldon): replace this when we start using stubs
self.old_get_client_class = client.get_client_class
client.get_client_class = lambda *_: fakes.FakeClient
self.requests = self.useFixture(requests_mock_fixture.Fixture())
self.requests.register_uri(
'GET', keystone_client.BASE_URL,
text=keystone_client.keystone_request_callback)
def tearDown(self):
# For some method like test_image_meta_bad_action we are
# testing a SystemExit to be thrown and object self.shell has
# no time to get instantatiated which is OK in this case, so
# we make sure the method is there before launching it.
if hasattr(self.shell, 'cs'):
self.shell.cs.clear_callstack()
# HACK(bcwaldon): replace this when we start using stubs
client.get_client_class = self.old_get_client_class
super(ShellTest, self).tearDown()
def run_command(self, cmd):
self.shell.main(cmd.split())
def assert_called(self, method, url, body=None, **kwargs):
return self.shell.cs.assert_called(method, url, body, **kwargs)
def assert_called_anytime(self, method, url, body=None):
return self.shell.cs.assert_called_anytime(method, url, body)
def test_extract_metadata(self):
# mimic the result of argparse's parse_args() method
class Arguments(object):
def __init__(self, metadata=None):
self.metadata = metadata or []
inputs = [
([], {}),
(["key=value"], {"key": "value"}),
(["key"], {"key": None}),
(["k1=v1", "k2=v2"], {"k1": "v1", "k2": "v2"}),
(["k1=v1", "k2"], {"k1": "v1", "k2": None}),
(["k1", "k2=v2"], {"k1": None, "k2": "v2"})
]
for input in inputs:
args = Arguments(metadata=input[0])
self.assertEqual(input[1], shell_v1._extract_metadata(args))
def test_translate_volume_keys(self):
cs = fakes.FakeClient()
v = cs.volumes.list()[0]
setattr(v, 'os-vol-tenant-attr:tenant_id', 'fake_tenant')
setattr(v, '_info', {'attachments': [{'server_id': 1234}],
'id': 1234, 'display_name': 'sample-volume',
'os-vol-tenant-attr:tenant_id': 'fake_tenant'})
shell_v1._translate_volume_keys([v])
self.assertEqual(v.tenant_id, 'fake_tenant')
def test_list(self):
self.run_command('list')
# NOTE(jdg): we default to detail currently
self.assert_called('GET', '/volumes/detail')
def test_list_filter_tenant_with_all_tenants(self):
self.run_command('list --tenant=123 --all-tenants 1')
self.assert_called('GET',
'/volumes/detail?all_tenants=1&project_id=123')
def test_list_filter_tenant_without_all_tenants(self):
self.run_command('list --tenant=123')
self.assert_called('GET',
'/volumes/detail?all_tenants=1&project_id=123')
def test_metadata_args_with_limiter(self):
self.run_command('create --metadata key1="--test1" 1')
expected = {'volume': {'snapshot_id': None,
'display_description': None,
'source_volid': None,
'status': 'creating',
'size': 1,
'volume_type': None,
'imageRef': None,
'availability_zone': None,
'attach_status': 'detached',
'user_id': None,
'project_id': None,
'metadata': {'key1': '"--test1"'},
'display_name': None}}
self.assert_called_anytime('POST', '/volumes', expected)
def test_metadata_args_limiter_display_name(self):
self.run_command('create --metadata key1="--t1" --display-name="t" 1')
expected = {'volume': {'snapshot_id': None,
'display_description': None,
'source_volid': None,
'status': 'creating',
'size': 1,
'volume_type': None,
'imageRef': None,
'availability_zone': None,
'attach_status': 'detached',
'user_id': None,
'project_id': None,
'metadata': {'key1': '"--t1"'},
'display_name': '"t"'}}
self.assert_called_anytime('POST', '/volumes', expected)
def test_delimit_metadata_args(self):
self.run_command('create --metadata key1="test1" key2="test2" 1')
expected = {'volume': {'snapshot_id': None,
'display_description': None,
'source_volid': None,
'status': 'creating',
'size': 1,
'volume_type': None,
'imageRef': None,
'availability_zone': None,
'attach_status': 'detached',
'user_id': None,
'project_id': None,
'metadata': {'key1': '"test1"',
'key2': '"test2"'},
'display_name': None}}
self.assert_called_anytime('POST', '/volumes', expected)
def test_delimit_metadata_args_display_name(self):
self.run_command('create --metadata key1="t1" --display-name="t" 1')
expected = {'volume': {'snapshot_id': None,
'display_description': None,
'source_volid': None,
'status': 'creating',
'size': 1,
'volume_type': None,
'imageRef': None,
'availability_zone': None,
'attach_status': 'detached',
'user_id': None,
'project_id': None,
'metadata': {'key1': '"t1"'},
'display_name': '"t"'}}
self.assert_called_anytime('POST', '/volumes', expected)
def test_list_filter_status(self):
self.run_command('list --status=available')
self.assert_called('GET', '/volumes/detail?status=available')
def test_list_filter_display_name(self):
self.run_command('list --display-name=1234')
self.assert_called('GET', '/volumes/detail?display_name=1234')
def test_list_all_tenants(self):
self.run_command('list --all-tenants=1')
self.assert_called('GET', '/volumes/detail?all_tenants=1')
def test_list_availability_zone(self):
self.run_command('availability-zone-list')
self.assert_called('GET', '/os-availability-zone')
def test_list_limit(self):
self.run_command('list --limit=10')
self.assert_called('GET', '/volumes/detail?limit=10')
def test_show(self):
self.run_command('show 1234')
self.assert_called('GET', '/volumes/1234')
def test_delete(self):
self.run_command('delete 1234')
self.assert_called('DELETE', '/volumes/1234')
def test_delete_by_name(self):
self.run_command('delete sample-volume')
self.assert_called_anytime('GET', '/volumes/detail?all_tenants=1&'
'display_name=sample-volume')
self.assert_called('DELETE', '/volumes/1234')
def test_delete_multiple(self):
self.run_command('delete 1234 5678')
self.assert_called_anytime('DELETE', '/volumes/1234')
self.assert_called('DELETE', '/volumes/5678')
def test_backup(self):
self.run_command('backup-create 1234')
self.assert_called('POST', '/backups')
def test_restore(self):
self.run_command('backup-restore 1234')
self.assert_called('POST', '/backups/1234/restore')
def test_snapshot_list_filter_volume_id(self):
self.run_command('snapshot-list --volume-id=1234')
self.assert_called('GET', '/snapshots/detail?volume_id=1234')
def test_snapshot_list_filter_status_and_volume_id(self):
self.run_command('snapshot-list --status=available --volume-id=1234')
self.assert_called('GET', '/snapshots/detail?'
'status=available&volume_id=1234')
def test_rename(self):
# basic rename with positional arguments
self.run_command('rename 1234 new-name')
expected = {'volume': {'display_name': 'new-name'}}
self.assert_called('PUT', '/volumes/1234', body=expected)
# change description only
self.run_command('rename 1234 --display-description=new-description')
expected = {'volume': {'display_description': 'new-description'}}
self.assert_called('PUT', '/volumes/1234', body=expected)
# rename and change description
self.run_command('rename 1234 new-name '
'--display-description=new-description')
expected = {'volume': {
'display_name': 'new-name',
'display_description': 'new-description',
}}
self.assert_called('PUT', '/volumes/1234', body=expected)
# Call rename with no arguments
self.assertRaises(SystemExit, self.run_command, 'rename')
def test_rename_snapshot(self):
# basic rename with positional arguments
self.run_command('snapshot-rename 1234 new-name')
expected = {'snapshot': {'display_name': 'new-name'}}
self.assert_called('PUT', '/snapshots/1234', body=expected)
# change description only
self.run_command('snapshot-rename 1234 '
'--display-description=new-description')
expected = {'snapshot': {'display_description': 'new-description'}}
self.assert_called('PUT', '/snapshots/1234', body=expected)
# snapshot-rename and change description
self.run_command('snapshot-rename 1234 new-name '
'--display-description=new-description')
expected = {'snapshot': {
'display_name': 'new-name',
'display_description': 'new-description',
}}
self.assert_called('PUT', '/snapshots/1234', body=expected)
# Call snapshot-rename with no arguments
self.assertRaises(SystemExit, self.run_command, 'snapshot-rename')
def test_set_metadata_set(self):
self.run_command('metadata 1234 set key1=val1 key2=val2')
self.assert_called('POST', '/volumes/1234/metadata',
{'metadata': {'key1': 'val1', 'key2': 'val2'}})
def test_set_metadata_delete_dict(self):
self.run_command('metadata 1234 unset key1=val1 key2=val2')
self.assert_called('DELETE', '/volumes/1234/metadata/key1')
self.assert_called('DELETE', '/volumes/1234/metadata/key2', pos=-2)
def test_set_metadata_delete_keys(self):
self.run_command('metadata 1234 unset key1 key2')
self.assert_called('DELETE', '/volumes/1234/metadata/key1')
self.assert_called('DELETE', '/volumes/1234/metadata/key2', pos=-2)
def test_reset_state(self):
self.run_command('reset-state 1234')
expected = {'os-reset_status': {'status': 'available'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_reset_state_attach(self):
self.run_command('reset-state --state in-use 1234')
expected = {'os-reset_status': {'status': 'in-use'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_reset_state_with_flag(self):
self.run_command('reset-state --state error 1234')
expected = {'os-reset_status': {'status': 'error'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_reset_state_multiple(self):
self.run_command('reset-state 1234 5678 --state error')
expected = {'os-reset_status': {'status': 'error'}}
self.assert_called_anytime('POST', '/volumes/1234/action',
body=expected)
self.assert_called_anytime('POST', '/volumes/5678/action',
body=expected)
def test_reset_state_two_with_one_nonexistent(self):
cmd = 'reset-state 1234 123456789'
self.assertRaises(exceptions.CommandError, self.run_command, cmd)
expected = {'os-reset_status': {'status': 'available'}}
self.assert_called_anytime('POST', '/volumes/1234/action',
body=expected)
def test_reset_state_one_with_one_nonexistent(self):
cmd = 'reset-state 123456789'
self.assertRaises(exceptions.CommandError, self.run_command, cmd)
def test_snapshot_reset_state(self):
self.run_command('snapshot-reset-state 1234')
expected = {'os-reset_status': {'status': 'available'}}
self.assert_called('POST', '/snapshots/1234/action', body=expected)
def test_snapshot_reset_state_with_flag(self):
self.run_command('snapshot-reset-state --state error 1234')
expected = {'os-reset_status': {'status': 'error'}}
self.assert_called('POST', '/snapshots/1234/action', body=expected)
def test_snapshot_reset_state_multiple(self):
self.run_command('snapshot-reset-state 1234 5678')
expected = {'os-reset_status': {'status': 'available'}}
self.assert_called_anytime('POST', '/snapshots/1234/action',
body=expected)
self.assert_called_anytime('POST', '/snapshots/5678/action',
body=expected)
def test_encryption_type_list(self):
"""
Test encryption-type-list shell command.
Verify a series of GET requests are made:
- one to get the volume type list information
- one per volume type to retrieve the encryption type information
"""
self.run_command('encryption-type-list')
self.assert_called_anytime('GET', '/types')
self.assert_called_anytime('GET', '/types/1/encryption')
self.assert_called_anytime('GET', '/types/2/encryption')
def test_encryption_type_show(self):
"""
Test encryption-type-show shell command.
Verify two GET requests are made per command invocation:
- one to get the volume type information
- one to get the encryption type information
"""
self.run_command('encryption-type-show 1')
self.assert_called('GET', '/types/1/encryption')
self.assert_called_anytime('GET', '/types/1')
def test_encryption_type_create(self):
"""
Test encryption-type-create shell command.
Verify GET and POST requests are made per command invocation:
- one GET request to retrieve the relevant volume type information
- one POST request to create the new encryption type
"""
expected = {'encryption': {'cipher': None, 'key_size': None,
'provider': 'TestProvider',
'control_location': 'front-end'}}
self.run_command('encryption-type-create 2 TestProvider')
self.assert_called('POST', '/types/2/encryption', body=expected)
self.assert_called_anytime('GET', '/types/2')
def test_encryption_type_update(self):
"""
Test encryption-type-update shell command.
Verify two GETs/one PUT requests are made per command invocation:
- one GET request to retrieve the relevant volume type information
- one GET request to retrieve the relevant encryption type information
- one PUT request to update the encryption type information
"""
self.skipTest("Not implemented")
def test_encryption_type_delete(self):
"""
Test encryption-type-delete shell command.
Verify one GET/one DELETE requests are made per command invocation:
- one GET request to retrieve the relevant volume type information
- one DELETE request to delete the encryption type information
"""
self.run_command('encryption-type-delete 1')
self.assert_called('DELETE', '/types/1/encryption/provider')
self.assert_called_anytime('GET', '/types/1')
def test_migrate_volume(self):
self.run_command('migrate 1234 fakehost --force-host-copy=True')
expected = {'os-migrate_volume': {'force_host_copy': 'True',
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_snapshot_metadata_set(self):
self.run_command('snapshot-metadata 1234 set key1=val1 key2=val2')
self.assert_called('POST', '/snapshots/1234/metadata',
{'metadata': {'key1': 'val1', 'key2': 'val2'}})
def test_snapshot_metadata_unset_dict(self):
self.run_command('snapshot-metadata 1234 unset key1=val1 key2=val2')
self.assert_called_anytime('DELETE', '/snapshots/1234/metadata/key1')
self.assert_called_anytime('DELETE', '/snapshots/1234/metadata/key2')
def test_snapshot_metadata_unset_keys(self):
self.run_command('snapshot-metadata 1234 unset key1 key2')
self.assert_called_anytime('DELETE', '/snapshots/1234/metadata/key1')
self.assert_called_anytime('DELETE', '/snapshots/1234/metadata/key2')
def test_volume_metadata_update_all(self):
self.run_command('metadata-update-all 1234 key1=val1 key2=val2')
self.assert_called('PUT', '/volumes/1234/metadata',
{'metadata': {'key1': 'val1', 'key2': 'val2'}})
def test_snapshot_metadata_update_all(self):
self.run_command('snapshot-metadata-update-all\
1234 key1=val1 key2=val2')
self.assert_called('PUT', '/snapshots/1234/metadata',
{'metadata': {'key1': 'val1', 'key2': 'val2'}})
def test_readonly_mode_update(self):
self.run_command('readonly-mode-update 1234 True')
expected = {'os-update_readonly_flag': {'readonly': True}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
self.run_command('readonly-mode-update 1234 False')
expected = {'os-update_readonly_flag': {'readonly': False}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_service_disable(self):
self.run_command('service-disable host cinder-volume')
self.assert_called('PUT', '/os-services/disable',
{"binary": "cinder-volume", "host": "host"})
def test_services_disable_with_reason(self):
cmd = 'service-disable host cinder-volume --reason no_reason'
self.run_command(cmd)
body = {'host': 'host', 'binary': 'cinder-volume',
'disabled_reason': 'no_reason'}
self.assert_called('PUT', '/os-services/disable-log-reason', body)
def test_service_enable(self):
self.run_command('service-enable host cinder-volume')
self.assert_called('PUT', '/os-services/enable',
{"binary": "cinder-volume", "host": "host"})
def test_snapshot_delete(self):
self.run_command('snapshot-delete 1234')
self.assert_called('DELETE', '/snapshots/1234')
def test_quota_delete(self):
self.run_command('quota-delete 1234')
self.assert_called('DELETE', '/os-quota-sets/1234')
def test_snapshot_delete_multiple(self):
self.run_command('snapshot-delete 1234 5678')
self.assert_called('DELETE', '/snapshots/5678')
def test_list_transfer(self):
self.run_command('transfer-list')
self.assert_called('GET', '/os-volume-transfer/detail')
def test_list_transfer_all_tenants(self):
self.run_command('transfer-list --all-tenants=1')
self.assert_called('GET', '/os-volume-transfer/detail?all_tenants=1')
| 44.366
| 78
| 0.598882
|
527f4b849972092777545c6dac8e2efb9efd4eb5
| 1,100
|
py
|
Python
|
src/stage_00_template.py
|
sarikamohan08/transfer-learning-ANN
|
d41d1f8a9a8d8e22016b1fb5116cfe1a36943d38
|
[
"MIT"
] | 1
|
2021-11-24T03:46:22.000Z
|
2021-11-24T03:46:22.000Z
|
src/stage_00_template.py
|
sarikamohan08/transfer-learning-ANN
|
d41d1f8a9a8d8e22016b1fb5116cfe1a36943d38
|
[
"MIT"
] | null | null | null |
src/stage_00_template.py
|
sarikamohan08/transfer-learning-ANN
|
d41d1f8a9a8d8e22016b1fb5116cfe1a36943d38
|
[
"MIT"
] | null | null | null |
import argparse
import os
import shutil
from tqdm import tqdm
import logging
from src.utils.common import read_yaml, create_directories
import random
STAGE = "STAGE_NAME" ## <<< change stage name
logging.basicConfig(
filename=os.path.join("logs", 'running_logs.log'),
level=logging.INFO,
format="[%(asctime)s: %(levelname)s: %(module)s]: %(message)s",
filemode="a"
)
def main(config_path, params_path):
## read config files
config = read_yaml(config_path)
params = read_yaml(params_path)
pass
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument("--config", "-c", default="configs/config.yaml")
args.add_argument("--params", "-p", default="params.yaml")
parsed_args = args.parse_args()
try:
logging.info("\n********************")
logging.info(f">>>>> stage {STAGE} started <<<<<")
main(config_path=parsed_args.config, params_path=parsed_args.params)
logging.info(f">>>>> stage {STAGE} completed!<<<<<\n")
except Exception as e:
logging.exception(e)
raise e
| 27.5
| 76
| 0.645455
|
52dc173b4bfd70fde11d49a7817ff0a7a5eed14f
| 2,582
|
py
|
Python
|
cli/aws_orbit/remote_files/cdk/lambda_sources/eks_service_handler/index.py
|
srinivasreddych/aws-orbit-workbench
|
2d154addff58d26f5459a73c06148aaf5e9fad46
|
[
"Apache-2.0"
] | 94
|
2021-03-19T19:55:11.000Z
|
2022-03-31T19:50:01.000Z
|
cli/aws_orbit/remote_files/cdk/lambda_sources/eks_service_handler/index.py
|
srinivasreddych/aws-orbit-workbench
|
2d154addff58d26f5459a73c06148aaf5e9fad46
|
[
"Apache-2.0"
] | 410
|
2021-03-19T18:04:48.000Z
|
2022-03-22T13:56:53.000Z
|
cli/aws_orbit/remote_files/cdk/lambda_sources/eks_service_handler/index.py
|
srinivasreddych/aws-orbit-workbench
|
2d154addff58d26f5459a73c06148aaf5e9fad46
|
[
"Apache-2.0"
] | 24
|
2021-03-19T23:16:23.000Z
|
2022-03-04T01:05:18.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from typing import Any, Dict, List, Optional
import boto3
logger = logging.getLogger()
logger.setLevel(logging.INFO)
REGION: str = os.environ["REGION"]
def get_nodegroups(cluster_name: str) -> List[Dict[str, Dict[str, str]]]:
return_response: List[Dict[str, Dict[str, str]]] = []
eks_client = boto3.client("eks")
try:
nodegroups_response = eks_client.list_nodegroups(clusterName=cluster_name)
for nodegroup_name in nodegroups_response["nodegroups"]:
nodegroup_details = eks_client.describe_nodegroup(clusterName=cluster_name, nodegroupName=nodegroup_name)
if "nodegroup" in nodegroup_details:
nodegroup = nodegroup_details["nodegroup"]
nodegroup_dict = {
"nodegroup_name": nodegroup_name,
"instance_types": nodegroup["instanceTypes"],
"scaling_config": json.dumps(nodegroup["scalingConfig"]).encode("utf-8"),
"status": nodegroup["status"],
"capacity_type": nodegroup["capacityType"],
}
# Get Disk size
if "diskSize" in nodegroup:
nodegroup_dict["disk_size"] = nodegroup["diskSize"]
# Get Launch Template details
if "launchTemplate" in nodegroup:
nodegroup_dict["launch_template"] = nodegroup["launchTemplate"]
return_response.append(nodegroup_dict)
except Exception as ekse:
logger.error("Error describing cluster %s nodegroups: %s", cluster_name, ekse)
raise ekse
return return_response
def handler(event: Dict[str, Any], context: Optional[Dict[str, Any]]) -> List[Dict[str, Dict[str, str]]]:
cluster_name = event["cluster_name"]
nodegroups: List[Dict[str, Dict[str, str]]] = get_nodegroups(cluster_name=cluster_name)
return nodegroups
| 41.645161
| 117
| 0.659566
|
d5a74da4c429e3b6ed11953d4f05038d4e99b050
| 2,659
|
py
|
Python
|
src/functions.py
|
waldoeGeek/ulauncher-reddit-search
|
54a67b1137c412350e6d447c15c88c0526d1e4f1
|
[
"MIT"
] | null | null | null |
src/functions.py
|
waldoeGeek/ulauncher-reddit-search
|
54a67b1137c412350e6d447c15c88c0526d1e4f1
|
[
"MIT"
] | null | null | null |
src/functions.py
|
waldoeGeek/ulauncher-reddit-search
|
54a67b1137c412350e6d447c15c88c0526d1e4f1
|
[
"MIT"
] | null | null | null |
import os
from urllib.parse import urlparse
import urllib.request as req
import requests
base_url = 'https://reddit.com/search.json?q='
class Searches:
def save_thumbnail(thumb_url):
# get thumbnail
file = urlparse(thumb_url)
# get thumbnail filename
file_path = os.path.basename(file.path)
icon_path = 'images/thumbs/' + file_path
url = thumb_url
req.urlretrieve(url, icon_path)
def result_type_url(url, query, type):
url = url
url = url + query + '&type=' + type
return url
def get_results(url, query):
type = 'all'
query = query
url = 'https://www.reddit.com/search.json?q=' + query
response = requests.get(url, headers = {'User-Agent': 'google'})
data = response.json()
data = data['data']['children']
#Check for empty results
if not data:
return 'No Results!'
else:
for child in data:
child['data']['subreddit'] = f"/r/{child['data']['subreddit']}"
return data
def get_users(url, query):
type = 'user'
query = query
# url = 'https://reddit.com/search.json?q=knives&type=user'
url = Searches.result_type_url(url, query, type)
# url = url + query + '&type=' + type
response = requests.get(url, headers = {'User-Agent': 'google'})
data = response.json()
data = data['data']['children']
# Check for empty results
if not data:
return 'No Results!'
else:
for child in data:
user_url = f"{child['data']['subreddit']['url']}"
child['data']['permalink'] = user_url
child['data']['subreddit'] = f"/{child['data']['subreddit']['display_name_prefixed']}"
child['data']['title'] = child['data']['name']
return data
def get_subs(url, query):
type = 'sr'
query = query
url = Searches.result_type_url(url, query, type)
response = requests.get(url, headers = {'User-Agent': 'google'})
data = response.json()
data = data['data']['children']
# Check for empty results
if not data:
return 'No Results'
else:
for child in data:
sub_reddit_value = child['data']['public_description']
child['data']['subreddit'] = sub_reddit_value
child['data']['title'] = child['data']['display_name_prefixed']
child['data']['permalink'] = f"/{child['data']['title']}"
return data
| 27.412371
| 102
| 0.540429
|
3a5c8d5a35a6dc325b97521a19c27dfc920d5407
| 331
|
py
|
Python
|
LeetCodePython/hammingDistanceSln.py
|
YouenZeng/LeetCode
|
626da7e4fa8d84cd833355ad14ea5ea39fe0c3b9
|
[
"MIT"
] | 1
|
2019-06-04T21:40:46.000Z
|
2019-06-04T21:40:46.000Z
|
LeetCodePython/hammingDistanceSln.py
|
YouenZeng/LeetCode
|
626da7e4fa8d84cd833355ad14ea5ea39fe0c3b9
|
[
"MIT"
] | 1
|
2018-06-04T01:55:25.000Z
|
2018-06-04T01:55:25.000Z
|
LeetCodePython/hammingDistanceSln.py
|
YouenZeng/LeetCode
|
626da7e4fa8d84cd833355ad14ea5ea39fe0c3b9
|
[
"MIT"
] | null | null | null |
class Solution:
def hammingDistance(self, x, y):
"""
:type x: int
:type y: int
:rtype: int
"""
count = 0
while(x != 0 or y != 0):
if x % 2 != y % 2:
count = count + 1
x = int(x / 2)
y = int(y / 2)
return count
| 20.6875
| 36
| 0.356495
|
2a828ca43e892bfa54ae5e6675d4d7fb82861721
| 4,154
|
py
|
Python
|
vel_cont.py
|
npav5057/NSEpython
|
cec7f0af83e8414a0b18829c7bd923a71bea5ff3
|
[
"MIT"
] | 1
|
2021-06-29T16:30:57.000Z
|
2021-06-29T16:30:57.000Z
|
vel_cont.py
|
npav5057/NSEpython
|
cec7f0af83e8414a0b18829c7bd923a71bea5ff3
|
[
"MIT"
] | null | null | null |
vel_cont.py
|
npav5057/NSEpython
|
cec7f0af83e8414a0b18829c7bd923a71bea5ff3
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import BoundaryNorm
import numpy as np
import matplotlib as mpl
from numpy.core.fromnumeric import ptp
from skimage.transform import resize
import os
import sys
from glob import glob
# Enable looped gif by overwriting the PillowWriter class and adding loop=0
if(len(sys.argv)<2):
print("folder name not Entered")
print("enter Correct Folder name as system arguments")
exit()
class myPillow(anim.PillowWriter):
def finish(self):
self._frames[0].save(
self._outfile, save_all=True, append_images=self._frames[1:],
duration=int(1000 / self.fps), loop=0)
home=os.getcwd()
path=home+"/OUTPUT/"+sys.argv[1]
if os.path.exists(path):
os.chdir(path)
print(path)
else:
print("Specified folder doesnt exist")
print("checck the path:",path)
print("or enter Correct Folder name: as system arguments")
exit()
files=sorted(glob("*npz"))
print(len(files))
if(len(files)>3000):
files=files[::10]
# exit()
data = np.load(files[10])
u=data['vx']
v=data['vy']
# v=v[::10]
# u=u[::10]
del data
s = np.sqrt(u**2+ v**2)
u_red = resize(u,(11,11))
v_red = resize(v,(11,11))
print(u.shape)
print(len(files))
X,Y = np.meshgrid(np.linspace(0, 1,u.shape[0]), np.linspace(0, 1, u.shape[1]))
cmap = mpl.cm.get_cmap('viridis')
norm = BoundaryNorm(np.linspace(0, 1.5, 21), cmap.N)
ct=0
for i in range(len(files)):
ct+=1
if(ct>1):
if(ct==20):
ct=0
# for only 0.1 secs
continue
fig = plt.figure()
ax = plt.axes()
data = np.load(files[i])
u=data['vx']
v=data['vy']
s = np.sqrt(u**2+v**2)
# p=data['pre']
del data,u,v
t=i/(len(files)-1)
print("Time:",t)
plt.contourf(X, Y, s, 200,norm = norm, cmap=cmap,extent=(-1, 1, -1, 1),alpha=0.8)
plt.colorbar()
pr=plt.contour(X, Y, s, 20, colors='black')
plt.clabel(pr, inline=True, fontsize=8)
# plt.imshow(s, norm = norm,extent=(-1, 1, -1, 1), origin='lower',cmap=cmap,alpha=0.9)
ax.set_title("Velocity at time:{}".format(t))
# bar = fig.colorbar(pr, orientation='vertical')
plt.xlabel("X axis")
plt.ylabel("Y axis")
plt.savefig("Vel_{}.png".format(i))
plt.clf()
plt.close()
# ct=0
# for i in range(len(files)):
# ct+=1
# if(ct>1):
# if(ct==20):
# ct=0
# continue
# fig = plt.figure()
# ax = plt.axes()
# data = np.load(files[i])
# p=data['pre']
# del data
# t=i/(len(files)-1)
# print("Time:",t)
# pr=ax.contourf(X, Y, p,cmap='viridis')
# ax.set_title("Pressure at time:{}".format(t))
# plt.colorbar(p)
# # bar = fig.colorbar(pr, orientation='vertical')
# plt.xlabel("X axis")
# plt.ylabel("Y axis")
# plt.savefig("PRes_{}.png".format(t))
# plt.clf()
# plt.close()
# fig = plt.figure(1, [5, 5])
# ax = fig.gca()
# # Plot colormap.
# cmap = mpl.cm.get_cmap('viridis')
# norm = BoundaryNorm(np.linspace(0.0, 1.0, 21), cmap.N)
# speed = ax.imshow(s, norm = norm, cmap=cmap, origin = "lower", extent = (0, 1, 0, 1))
# # Plot colorbar.
# divider = make_axes_locatable(ax)
# cax = divider.append_axes('right', size='5%', pad=0.05)
# bar = fig.colorbar(speed, cax=cax, orientation='vertical')
# loc = mpl.ticker.MultipleLocator(0.2)
# bar.locator = loc
# bar.update_ticks()
# # Plot vector field.
# X, Y = np.mgrid[0:1:11j, 0:1:11j]
# vec = ax.quiver(Y, X, u_red, v_red, scale=1.0, color="white")
# plt.tight_layout()
# def animate(n):
# data = np.load(files[n])
# u=data['vx']
# v=data['vy']
# if(n%10==0):
# print("frame::",n)
# s = np.sqrt(u**2+v**2)
# speed.set_data(s)
# u_red = resize(u, (11, 11))
# v_red = resize(v, (11, 11))
# vec.set_UVC(u_red, v_red)
# ax.text
# return speed, vec
# writer = myPillow()
# writer.fps = 1
# animation = anim.FuncAnimation(fig, animate, frames=len(files), interval = 2, blit=True)
# animation.save('Re1_50x50_v.gif', writer=writer)
| 23.077778
| 90
| 0.598219
|
80ab8e607bff611220e77f963b4097de724fa914
| 426
|
py
|
Python
|
sdk/python/pulumi_gcp/binaryauthorization/__init__.py
|
dimpu47/pulumi-gcp
|
38355de300a5768e11c49d344a8165ba0735deed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/binaryauthorization/__init__.py
|
dimpu47/pulumi-gcp
|
38355de300a5768e11c49d344a8165ba0735deed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/binaryauthorization/__init__.py
|
dimpu47/pulumi-gcp
|
38355de300a5768e11c49d344a8165ba0735deed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .attestor import *
from .attestor_iam_binding import *
from .attestor_iam_member import *
from .attestor_iam_policy import *
from .policy import *
from ._inputs import *
from . import outputs
| 32.769231
| 87
| 0.744131
|
b7155dc47e930c073b375140b1ca64d1d204b98c
| 2,402
|
py
|
Python
|
app/language_interface.py
|
Albert5544/frontend
|
dab95f2a6ba95e8521c0e439da0081d2edf69cf9
|
[
"MIT"
] | null | null | null |
app/language_interface.py
|
Albert5544/frontend
|
dab95f2a6ba95e8521c0e439da0081d2edf69cf9
|
[
"MIT"
] | 2
|
2021-02-02T22:48:24.000Z
|
2021-06-02T02:04:53.000Z
|
app/language_interface.py
|
Albert5544/frontend
|
dab95f2a6ba95e8521c0e439da0081d2edf69cf9
|
[
"MIT"
] | null | null | null |
import sys
from abc import abstractmethod, ABCMeta
import docker
import os
from app import db, celery
from app.models import User, Dataset
class language_interface(object):
__metaclass__ = ABCMeta # 指定这是一个抽象类
@abstractmethod
def preprocessing(self, preprocess, dataverse_key='', doi='', zip_file='', run_instr='', user_pkg=''):
pass
@abstractmethod # 抽象方法
def build_docker_file(self, dir_name, docker_pkgs, addtional_info):
pass
@abstractmethod # 抽象方法
def create_report(self, current_user_id, name, dir_name):
pass
@abstractmethod # 抽象方法
def clean_up_datasets(self, dir):
pass
def build_docker_img(self, docker_file_dir, current_user_id, name):
# create docker client instance
client = docker.from_env()
# build a docker image using docker file
client.login(os.environ.get('DOCKER_USERNAME'), os.environ.get('DOCKER_PASSWORD'))
# name for docker image
current_user_obj = User.query.get(current_user_id)
# image_name = ''.join(random.choice(string.ascii_lowercase) for _ in range(5))
image_name = current_user_obj.username + '-' + name
repo_name = os.environ.get('DOCKER_REPO') + '/'
client.images.build(path=docker_file_dir, tag=repo_name + image_name)
########## PUSHING IMG ######################################################################
def push_docker_img(self, dir_name,current_user_id, name, report):
client = docker.from_env()
current_user_obj = User.query.get(current_user_id)
image_name = current_user_obj.username + '-' + name
repo_name = os.environ.get('DOCKER_REPO') + '/'
print(client.images.push(repository=repo_name + image_name), file=sys.stderr)
########## UPDATING DB ######################################################################
# add dataset to database
new_dataset = Dataset(url="https://hub.docker.com/raas/" + repo_name + image_name + "/",
author=current_user_obj,
name=name,
report=report)
db.session.add(new_dataset)
db.session.commit()
########## CLEANING UP ######################################################################
self.clean_up_datasets(dir_name)
print("Returning")
| 36.393939
| 106
| 0.579101
|
a528b1e82fd5fd1dff27ada8a7d49492e1d385ab
| 1,531
|
py
|
Python
|
utils/constants.py
|
Saizuo/Photon-V1
|
bf63ac4c4982c7139ea5d010884756b7c8951108
|
[
"BSD-3-Clause"
] | 9
|
2021-09-01T13:43:21.000Z
|
2022-02-23T02:46:11.000Z
|
utils/constants.py
|
ShackerFox/EpicBot
|
c911cd052b0d3c2b5d8de52b2bd704021e8f0d4d
|
[
"BSD-3-Clause"
] | null | null | null |
utils/constants.py
|
ShackerFox/EpicBot
|
c911cd052b0d3c2b5d8de52b2bd704021e8f0d4d
|
[
"BSD-3-Clause"
] | null | null | null |
brain_images = {
"no_brain": [
"https://i1.sndcdn.com/artworks-000583301069-jy5ib2-t500x500.jpg",
"https://i.pinimg.com/originals/6a/1c/84/6a1c843de4d7a5b5843ef63e6ba47e8b.jpg",
"https://cdn.discordapp.com/attachments/859335247547990026/880461702355902544/Z.png"
],
"small": [
"https://thumbs.dreamstime.com/b/small-brain-fingers-close-view-male-hand-taking-human-72334857.jpg",
"https://media.istockphoto.com/photos/xray-of-a-man-with-small-brain-inside-picture-id182163441?k=6&m=182163441&s=170667a&w=0&h=gmcvJM2LKhh37Pi9WLtXWhMwtqCRa7h98UcaWUEYJJg=",
"https://cdn.drawception.com/drawings/Gx0YdMvYOY.png",
"https://thumbs.dreamstime.com/b/x-ray-small-brain-black-background-41056681.jpg",
"https://culturedecanted.files.wordpress.com/2014/06/small.jpg?w=640"
],
"medium": [
"https://img.i-scmp.com/cdn-cgi/image/fit=contain,width=425,format=auto/sites/default/files/styles/768x768/public/d8/images/methode/2020/07/10/ad89450a-c1d5-11ea-8c85-9f30eae6654e_image_hires_194031.JPG?itok=SmtqUNGR&v=1594381242",
"https://ychef.files.bbci.co.uk/976x549/p028qsgx.jpg"
],
# just like my dic-
"big": [
"https://i1.sndcdn.com/avatars-000597831615-6q438f-t500x500.jpg",
"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRgERYJeiv_ozoqu2sXNlINA6PXnoa3yfuCus7fpDIZ4ka2bZG1oL0vfnWbfqN8ElQN-ZY&usqp=CAU",
"https://www.pngitem.com/pimgs/m/523-5238503_pepe-the-frog-big-brain-hd-png-download.png"
]
}
| 61.24
| 239
| 0.717178
|
1f1935ac0f60cbab385d17a6cb557af116fc1132
| 17,619
|
py
|
Python
|
src/wstool/config_yaml.py
|
asaba96/wstool
|
9c44843e3fc1904ef1af94fde0fe519dc410abc8
|
[
"BSD-3-Clause"
] | null | null | null |
src/wstool/config_yaml.py
|
asaba96/wstool
|
9c44843e3fc1904ef1af94fde0fe519dc410abc8
|
[
"BSD-3-Clause"
] | null | null | null |
src/wstool/config_yaml.py
|
asaba96/wstool
|
9c44843e3fc1904ef1af94fde0fe519dc410abc8
|
[
"BSD-3-Clause"
] | null | null | null |
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import yaml
from vcstools.common import urlopen_netrc
from wstool.common import MultiProjectException
__REPOTYPES__ = ['svn', 'bzr', 'hg', 'git', 'tar']
__ALLTYPES__ = __REPOTYPES__ + ['other', 'setup-file']
## The Path spec is a lightweight object to transport the
## specification of a config element between functions,
## independently of yaml structure.
## Specifications are persisted in yaml, this file deals
## with manipulations of any such structures representing configs as
## yaml.
## get_path_spec_from_yaml turns yaml into path_spec, and pathspec
## get_legacy_yaml returns yaml.
def get_yaml_from_uri(uri):
"""reads and parses yaml from a local file or remote uri"""
stream = None
try:
try:
if os.path.isfile(uri):
try:
stream = open(uri, 'r')
except IOError as ioe:
raise MultiProjectException(
"Unable open file [%s]: %s" % (uri, ioe))
else:
try:
stream = urlopen_netrc(uri)
except IOError as ioe2:
raise MultiProjectException(
"Unable to download URL [%s]: %s" % (uri, ioe2))
except ValueError as vae:
raise MultiProjectException(
"Is not a local file, nor a valid URL [%s] : %s" % (uri, vae))
if not stream:
raise MultiProjectException("couldn't load config uri %s" % uri)
try:
yamldata = yaml.load(stream)
except yaml.YAMLError as yame:
raise MultiProjectException(
"Invalid multiproject yaml format in [%s]: %s" % (uri, yame))
# we want a list or a dict, but pyyaml parses xml as string
if type(yamldata) == 'str':
raise MultiProjectException(
"Invalid multiproject yaml format in [%s]: %s" % (uri, yamldata))
finally:
if stream is not None:
stream.close()
return yamldata
def get_path_specs_from_uri(uri, config_filename=None, as_is=False):
"""
Builds a list of PathSpec elements from several types of input
locations, "uris".
The function treats other workspace folders/files as special uris
to prevent mutual conflicts.
:param uri: a folder, a file, or a web url
:param config_filename: name for files to be treated special
as other workspaces
:param as_is: do not rewrite, used for loading the current
workspace config without rewriting
"""
if os.path.isdir(uri):
if (config_filename is not None and
os.path.isfile(os.path.join(uri, config_filename))):
uri = os.path.join(uri, config_filename)
else:
# plain folders returned as themselves
return [PathSpec(local_name=uri)]
yaml_spec = get_yaml_from_uri(uri)
if yaml_spec is None:
return []
specs = [get_path_spec_from_yaml(x) for x in yaml_spec]
if (config_filename is not None and
not as_is and
os.path.isfile(uri) and
os.path.basename(uri) == config_filename):
# treat config files and folders with such files special
# to prevent 2 workspaces from interacting
specs = rewrite_included_source(specs, os.path.dirname(uri))
return specs
def rewrite_included_source(source_path_specs, source_dir):
"""
assumes source_path_specs is the contents of a config file in
another directory source dir. It rewrites all elements, by changing
any relative path relative to source dir and changing vcs
types to non-vcs types types, to prevent two environments from
conflicting
"""
for index, pathspec in enumerate(source_path_specs):
local_name = os.path.normpath(os.path.join(source_dir,
pathspec.get_local_name()))
pathspec.set_local_name(local_name)
if pathspec.get_path() is not None:
path = os.path.normpath(
os.path.join(source_dir, pathspec.get_path()))
pathspec.set_path(path)
pathspec.detach_vcs_info()
source_path_specs[index] = pathspec
return source_path_specs
def aggregate_from_uris(config_uris, config_filename=None, allow_other_element=True):
"""
Builds a List of PathSpec from a list of location strings (uri,
paths). If locations is a folder, attempts to find config_filename
in it, and use "folder/config_filename" instead(rewriting element
path and stripping scm nature), else add folder as PathSpec.
Anything else, parse yaml at location, and add a PathSpec for each
element.
:param config_uris: source of yaml
:param config_filename: file to use when given a folder
:param allow_other_element: if False, discards elements
to be added without SCM information
"""
aggregate_source_yaml = []
# build up a merged list of config elements from all given config_uris
if config_uris is None:
return []
for loop_uri in config_uris:
source_path_specs = get_path_specs_from_uri(
loop_uri, config_filename)
# allow duplicates, dealt with in Config class
if not allow_other_element:
for spec in source_path_specs:
if not spec.get_scmtype():
raise MultiProjectException(
"Forbidden non-SCM element: %s (%s)" %
(spec.get_local_name(), spec.get_legacy_type()))
aggregate_source_yaml.extend(source_path_specs)
return aggregate_source_yaml
class PathSpec:
def __init__(self,
# localname is used as ID, currently also is used as path
local_name,
scmtype=None,
uri=None,
version=None,
curr_version=None,
tags=None,
revision=None,
currevision=None,
remote_revision=None,
path=None,
curr_uri=None):
"""
Fills in local properties based on dict, unifies different syntaxes
:param local-name: to be unique within config, filesystem path to folder
:param scmtype: one of __ALLTYPES__
:param uri: uri from config file
:param version: version label from config file (branchname, tagname, sha-id)
:param cur_version: version information label(s) from VCS (branchname, remote, tracking branch)
:param tags: arbirtrary meta-information (used for ROS package indexing)
:param revision: unique id of label stored in version
:param currrevision: unique id of actual version in file system
:param path: path to folder (currently equivalent to local_name)
:param curr_uri: actual remote uri used in local checkout
"""
self._local_name = local_name
self._path = path
self._uri = uri
self._curr_uri = curr_uri
self._version = version
self._curr_version = curr_version
self._scmtype = scmtype
self._tags = tags or []
self._revision = revision
self._currevision = currevision
self._remote_revision = remote_revision
def __str__(self):
return str(self.get_legacy_yaml())
def __repr__(self):
return "PathSpec(%s)" % self.__str__()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def detach_vcs_info(self):
"""if wrapper has VCS information, remove it to make it a plain folder"""
if self._scmtype is not None:
self._scmtype = None
self._uri = None
self._version = None
self._curr_version = None
self._revision = None
self._currevision = None
self._remote_revision = None
def get_legacy_type(self):
"""return one of __ALLTYPES__"""
if self._scmtype is not None:
return self._scmtype
elif self._tags is not None and 'setup-file' in self._tags:
return 'setup-file'
return 'other'
def get_legacy_yaml(self, spec=True, exact=False):
"""
:param spec: If True, the version information will come from the
workspace .rosinstall. If False, the version information will come
from the current work trees.
:param exact: If True, the versions will be set to the exact commit
UUIDs. If False, the version name will be used, which might be a
branch name aut cetera.
return something like
{hg: {local-name: common,
version: common-1.0.2,
uri: https://kforge.org/common/}}
"""
# TODO switch to new syntax
properties = {'local-name': self._local_name}
if spec:
if self._uri is not None:
properties['uri'] = self._uri
if exact:
if self._revision is not None:
properties['version'] = self._revision
else:
if self._version is not None:
properties['version'] = self._version
else:
if self._curr_uri is not None:
properties['uri'] = self._curr_uri
if exact:
if self._currevision is not None:
properties['version'] = self._currevision
else:
if self._curr_version is not None:
properties['version'] = self._curr_version
if self._tags is not None:
for tag in self._tags:
if tag != 'setup-file' and tag != []:
if type(tag) == dict:
properties.update(tag)
else:
properties[tag] = None
yaml_dict = {self.get_legacy_type(): properties}
return yaml_dict
def get_local_name(self):
return self._local_name
def set_local_name(self, local_name):
self._local_name = local_name
def get_path(self):
return self._path
def set_path(self, path):
self._path = path
def get_tags(self):
return self._tags
def get_scmtype(self):
return self._scmtype
def get_version(self):
return self._version
def get_curr_version(self):
return self._curr_version
def get_revision(self):
return self._revision
def get_current_revision(self):
return self._currevision
def get_remote_revision(self):
return self._remote_revision
def get_uri(self):
return self._uri
def get_curr_uri(self):
return self._curr_uri
def get_path_spec_from_yaml(yaml_dict):
"""
Fills in local properties based on dict, unifies different syntaxes
"""
local_name = None
uri = None
version = None
scmtype = None
tags = []
if type(yaml_dict) != dict:
raise MultiProjectException(
"Yaml for each element must be in YAML dict form: %s " % yaml_dict)
# old syntax:
# - hg: {local-name: common_rosdeps,
# version: common_rosdeps-1.0.2,
# uri: https://kforge.ros.org/common/rosdepcore}
# - setup-file: {local-name: /opt/ros/fuerte/setup.sh}
# - other: {local-name: /opt/ros/fuerte/share/ros}
# - other: {local-name: /opt/ros/fuerte/share}
# - other: {local-name: /opt/ros/fuerte/stacks}
if yaml_dict is None or len(yaml_dict) == 0:
raise MultiProjectException("no element in yaml dict.")
if len(yaml_dict) > 1:
raise MultiProjectException(
"too many keys in element dict %s" % (list(yaml_dict.keys())))
if not list(yaml_dict.keys())[0] in __ALLTYPES__:
raise MultiProjectException(
"Unknown element type '%s'" % (list(yaml_dict.keys())[0]))
firstkey = list(yaml_dict.keys())[0]
if firstkey in __REPOTYPES__:
scmtype = list(yaml_dict.keys())[0]
if firstkey == 'setup-file':
tags.append('setup-file')
values = yaml_dict[firstkey]
if values is not None:
for key, value in list(values.items()):
if key == "local-name":
local_name = value
elif key == "meta":
tags.append({key: value})
elif key == "uri":
uri = value
elif key == "version":
# VCs tools expects version to be
# string; otherwise, all integer
# versions will break application
if value is not None:
version = str(value)
else:
raise MultiProjectException(
"Unknown key %s in %s" % (key, yaml_dict))
# global validation
if local_name is None:
raise MultiProjectException(
"Config element without a local-name: %s" % (yaml_dict))
if scmtype != None:
if uri is None:
raise MultiProjectException(
"scm type without declared uri in %s" % (values))
# local_name is fixed, path may be normalized, made absolute, etc.
path = local_name
return PathSpec(local_name=local_name,
path=path,
scmtype=scmtype,
uri=uri,
version=version,
tags=tags)
def generate_config_yaml(config, filename, header, pretty=False,
sort_with_localname=False, spec=True,
exact=False, vcs_only=False):
"""
Writes file filename with header first and then the config as YAML.
:param config: The configuration containing all the entries to be included
in the generated YAML.
:param filename: If filename is not an absolute path, it will be assumed to
be relative to config.get_base_path(). If filename is None, the output will
be sent to stdout instead of a file.
:param header: A header to be included with the generated config YAML.
:param pretty: If True, the generated config YAML will be printed in
long-form YAML. If false, the default flow style will be used instead.
:param sort_with_localname: If true, config entries will be sorted by their
localname fields. If false, the order will be as passed in through config.
:param spec: If True, the version information will come from the workspace
.rosinstall. If False, the version information will come from the current
work trees.
:param exact: If True, the versions will be set to the exact commit UUIDs.
If False, the version name will be used, which might be a branch name
aut cetera.
:param vcs_only: If True, the generated config YAML will include only
version-controlled entries. If False, all entries in current workspace will
be included.
"""
if not os.path.exists(config.get_base_path()):
os.makedirs(config.get_base_path())
content = ""
if header:
content += header
# Do a pass-through if just pulling versioning information straight from
# the .rosinstall
passthrough = spec and not exact
items = config.get_source(not passthrough, vcs_only)
if sort_with_localname:
items = sorted(items, key=lambda x: x.get_local_name())
items = [x.get_legacy_yaml(spec, exact) for x in items]
if items:
if pretty:
content += yaml.safe_dump(items, allow_unicode=True,
default_flow_style=False)
else:
content += yaml.safe_dump(items)
if filename:
config_filepath = filename if os.path.isabs(filename) else \
os.path.realpath(os.path.join(config.get_base_path(), filename))
with open(config_filepath, 'w+b') as f:
f.write(content.encode('UTF-8'))
else:
print(content)
| 38.053996
| 103
| 0.627958
|
ee65970340db3d1f2e7d5e9dd2e03535bd108580
| 583
|
py
|
Python
|
meals/urls.py
|
faix/registration
|
11b6f5f419bd39132e9a40f3366b27451a436778
|
[
"MIT"
] | 1
|
2019-12-03T05:21:56.000Z
|
2019-12-03T05:21:56.000Z
|
meals/urls.py
|
oriolclosa/registration
|
91d85d7fbbf25339aa37ed9b46c49b696a5fc6d1
|
[
"MIT"
] | 2
|
2020-10-12T15:38:57.000Z
|
2022-02-24T19:02:46.000Z
|
meals/urls.py
|
oriolclosa/registration
|
91d85d7fbbf25339aa37ed9b46c49b696a5fc6d1
|
[
"MIT"
] | 4
|
2020-10-12T15:30:46.000Z
|
2022-03-13T13:27:58.000Z
|
from django.conf.urls import url
from meals import views
urlpatterns = [
url(r'^list/$', views.MealsList.as_view(), name='meals_list'),
url(r'^add/$', views.MealAdd.as_view(), name='meal_add'),
url(r'^users/$', views.MealsUsers.as_view(), name='meals_users'),
url(r'^(?P<id>[\w-]+)$', views.MealDetail.as_view(), name='meal_detail'),
url(r'^scan/(?P<id>[\w-]+)$', views.MealsCheckin.as_view(), name='meal_checkin'),
url(r'^api/$', views.MealsApi.as_view(), name='meals_api'),
url(r'^api/checkin$', views.MealsCoolAPI.as_view(), name='cool_meals_api')
]
| 41.642857
| 85
| 0.64837
|
6f9f142fb83cd79890a4a9c5e1cd19229d3b2f3a
| 46
|
py
|
Python
|
src/adafruit_blinka/microcontroller/starfive/JH71x0/__init__.py
|
twa127/Adafruit_Blinka
|
560ab1fcabf64b9941ed57a3cfed996b836178fa
|
[
"MIT"
] | 294
|
2018-06-30T19:08:27.000Z
|
2022-03-26T21:08:47.000Z
|
src/adafruit_blinka/microcontroller/starfive/JH71x0/__init__.py
|
twa127/Adafruit_Blinka
|
560ab1fcabf64b9941ed57a3cfed996b836178fa
|
[
"MIT"
] | 421
|
2018-06-30T20:54:46.000Z
|
2022-03-31T15:08:37.000Z
|
src/adafruit_blinka/microcontroller/starfive/JH71x0/__init__.py
|
twa127/Adafruit_Blinka
|
560ab1fcabf64b9941ed57a3cfed996b836178fa
|
[
"MIT"
] | 234
|
2018-07-23T18:49:16.000Z
|
2022-03-28T16:59:48.000Z
|
"""Definition for the StarFive JH71x0 chip"""
| 23
| 45
| 0.73913
|
af23f15ed8f231b38680858b9de009a07c4dc42f
| 1,039
|
py
|
Python
|
Examples/Modules/nci_corrector/analysis_ncicorr.py
|
mrowan137/amrex
|
cafcb6bd5902fc72a4d6fa51b99fe837f5eb5381
|
[
"BSD-3-Clause-LBNL"
] | 131
|
2018-09-29T08:11:40.000Z
|
2022-03-28T23:24:22.000Z
|
Examples/Modules/nci_corrector/analysis_ncicorr.py
|
mrowan137/amrex
|
cafcb6bd5902fc72a4d6fa51b99fe837f5eb5381
|
[
"BSD-3-Clause-LBNL"
] | 1,656
|
2018-10-02T01:49:24.000Z
|
2022-03-31T21:27:31.000Z
|
Examples/Modules/nci_corrector/analysis_ncicorr.py
|
mrowan137/amrex
|
cafcb6bd5902fc72a4d6fa51b99fe837f5eb5381
|
[
"BSD-3-Clause-LBNL"
] | 100
|
2018-10-01T20:41:14.000Z
|
2022-03-10T10:30:42.000Z
|
#! /usr/bin/env python
# Copyright 2019 Jean-Luc Vay, Maxence Thevenet, Remi Lehe
# Weiqun Zhang
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
import sys
import yt
import re
import numpy as np
import scipy.constants as scc
yt.funcs.mylog.setLevel(0)
fn = sys.argv[1]
use_MR = re.search( 'nci_correctorMR', fn ) != None
if use_MR:
energy_corrector_off = 5.e32
energy_threshold = 1.e28
else:
energy_corrector_off = 1.5e26
energy_threshold = 1.e24
# Check EB energy after 1000 timesteps
filename = sys.argv[1]
ds = yt.load( filename )
ad0 = ds.covering_grid(level=0,left_edge=ds.domain_left_edge, dims=ds.domain_dimensions)
ex = ad0['boxlib', 'Ex'].v
ez = ad0['boxlib', 'Ez'].v
by = ad0['boxlib', 'By'].v
energy = np.sum(ex**2 + ez**2 + scc.c**2*by**2)
print("use_MR: %s" %use_MR)
print("energy if corrector off (from benchmark): %s" %energy_corrector_off)
print("energy threshold (from benchmark): %s" %energy_threshold)
print("energy from this run: %s" %energy)
assert( energy < energy_threshold )
| 23.613636
| 88
| 0.706449
|
79d08b53f3f546a5fe2bffb5cbe2f56f66859a05
| 3,702
|
py
|
Python
|
utils.py
|
pavitra14/Text2Handwriting
|
c66fda8e1ba024102aee3e0cf3159ba461d41468
|
[
"MIT"
] | null | null | null |
utils.py
|
pavitra14/Text2Handwriting
|
c66fda8e1ba024102aee3e0cf3159ba461d41468
|
[
"MIT"
] | 1
|
2021-06-08T21:44:41.000Z
|
2021-06-08T21:44:41.000Z
|
utils.py
|
pavitra14/Text2Handwriting
|
c66fda8e1ba024102aee3e0cf3159ba461d41468
|
[
"MIT"
] | 1
|
2020-10-23T03:10:08.000Z
|
2020-10-23T03:10:08.000Z
|
import pytesseract
import cv2
import matplotlib.pyplot as plt
from PIL import Image
from flask import request
from pytesseract import Output
from werkzeug.utils import secure_filename
import base64
import os
import json
import pickle
# from BackgroundRemoval import processImage
import shutil
from TokenManagement import TokenManager
# allow files of a specific type
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
def get_custom_handwriting() -> list:
fp = open("custom.hw", "rb")
custom_hw = pickle.load(fp)
fp.close()
return custom_hw
def add_custom_handwriting(hw_name: str) -> list:
fp = open("custom.hw", "rb")
custom_hw = pickle.load(fp)
fp.close()
custom_hw.append(hw_name)
with open("custom.hw", "wb") as fp:
pickle.dump(custom_hw, fp)
fp.close()
return custom_hw
def createEmptyCustomHW():
data = []
with open("custom.hw", "wb") as f:
pickle.dump(data, f)
def list_to_json(l: list) -> str:
return json.dumps(l)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def load_image(image) -> Image:
try:
image = cv2.imread(image)
return image
except:
return False
def get_boxes(image: Image) -> dict:
box_raw = pytesseract.image_to_boxes(image)
boxes = {}
h, w, _ = image.shape
for b in box_raw.splitlines():
b = b.split(' ')
char = b[0]
x1 = int(b[1])
y1 = h - int(b[2])
x2 = int(b[3])
y2 = h - int(b[4])
boxes[char] = (x1, y1, x2, y2)
return boxes
def extract_letters(image: Image, boxes: dict, token: str):
tk = TokenManager()
hw_name = tk.getTokenName(token);
path = os.path.join('./static/trained/',hw_name)
defaultPath = os.path.join('./static/trained/','defaultText2')
if not os.path.exists(path):
os.mkdir(path)
print(hw_name, "folder created at", path)
for i in range(32, 126):
status=False
try:
if i == 96:
continue
char = chr(i)
filename_t = "{}_t.png".format(i)
if char in boxes.keys():
#TODO
x1,y1,x2,y2 = boxes[char]
output = image[y1:y2, x1:x2]
# output = processImage(letter)
cv2.imwrite(os.path.join(path,filename_t), output)
status=True
else:
source = os.path.join(defaultPath,filename_t)
dest = os.path.join(path, filename_t)
print("copying {} to {}".format(source, dest))
shutil.copyfile(source,dest)
status=False
yield (char,status)
except Exception as ex:
source = os.path.join(defaultPath,filename_t)
dest = os.path.join(path, filename_t)
print("copying {} to {}".format(source, dest))
shutil.copyfile(source,dest)
yield (char,status)
return True
def get_base64(image):
retval, buffer = cv2.imencode(".png", image)
png = base64.b64encode(buffer)
return png.decode('utf-8')
def boxes_web(boxes: dict, image):
for char in boxes:
x1, y1, x2, y2 = boxes[char]
cv2.rectangle(image, (x1,y1), (x2,y2), (0,255,0), 2)
return get_base64(image)
def get_handwriting_list() -> list:
path = os.path.join("./static/", "trained")
output = set(sorted([dI for dI in os.listdir(path) if os.path.isdir(os.path.join(path,dI))]))
print(output)
print(get_custom_handwriting())
output = output - set(get_custom_handwriting())
return list(sorted(list(output)))
| 29.616
| 97
| 0.594273
|
71e5bd291f2315b2bee17d963c5f1e2d97f3371a
| 99,677
|
py
|
Python
|
src/transformers/utils/dummy_pt_objects.py
|
Ravoxsg/transformers
|
3212a1d4a6fbded40daad7153f222c91acabe82d
|
[
"Apache-2.0"
] | 3
|
2022-02-22T07:06:32.000Z
|
2022-03-16T05:30:19.000Z
|
src/transformers/utils/dummy_pt_objects.py
|
Ravoxsg/transformers
|
3212a1d4a6fbded40daad7153f222c91acabe82d
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/utils/dummy_pt_objects.py
|
Ravoxsg/transformers
|
3212a1d4a6fbded40daad7153f222c91acabe82d
|
[
"Apache-2.0"
] | null | null | null |
# This file is autogenerated by the command `make fix-copies`, do not edit.
# flake8: noqa
from ..file_utils import DummyObject, requires_backends
class PyTorchBenchmark(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PyTorchBenchmarkArguments(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GlueDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GlueDataTrainingArguments(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineTextDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineWithRefDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineWithSOPTextDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SquadDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SquadDataTrainingArguments(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TextDataset(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TextDatasetForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeamScorer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeamSearchScorer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ForcedBOSTokenLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ForcedEOSTokenLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HammingDiversityLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class InfNanRemoveLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LogitsProcessorList(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LogitsWarper(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MinLengthLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NoBadWordsLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NoRepeatNGramLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PrefixConstrainedLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RepetitionPenaltyLogitsProcessor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TemperatureLogitsWarper(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TopKLogitsWarper(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TopPLogitsWarper(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaxLengthCriteria(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaxTimeCriteria(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class StoppingCriteria(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class StoppingCriteriaList(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def top_k_top_p_filtering(*args, **kwargs):
requires_backends(top_k_top_p_filtering, ["torch"])
class Conv1D(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def apply_chunking_to_forward(*args, **kwargs):
requires_backends(apply_chunking_to_forward, ["torch"])
def prune_layer(*args, **kwargs):
requires_backends(prune_layer, ["torch"])
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AlbertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_albert(*args, **kwargs):
requires_backends(load_tf_weights_in_albert, ["torch"])
MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = None
MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = None
MODEL_FOR_CAUSAL_LM_MAPPING = None
MODEL_FOR_CTC_MAPPING = None
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = None
MODEL_FOR_MASKED_LM_MAPPING = None
MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None
MODEL_FOR_OBJECT_DETECTION_MAPPING = None
MODEL_FOR_PRETRAINING_MAPPING = None
MODEL_FOR_QUESTION_ANSWERING_MAPPING = None
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None
MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None
MODEL_FOR_VISION_2_SEQ_MAPPING = None
MODEL_MAPPING = None
MODEL_WITH_LM_HEAD_MAPPING = None
class AutoModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForAudioClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForAudioXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForImageSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForObjectDetection(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForSeq2SeqLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForSpeechSeq2Seq(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForTableQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForVision2Seq(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelWithLMHead(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BART_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BartForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartPretrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PretrainedBartModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BeitForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeitForMaskedImageModeling(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeitForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeitModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeitPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_bert(*args, **kwargs):
requires_backends(load_tf_weights_in_bert, ["torch"])
class BertGenerationDecoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertGenerationEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertGenerationPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_bert_generation(*args, **kwargs):
requires_backends(load_tf_weights_in_bert_generation, ["torch"])
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BigBirdForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_big_bird(*args, **kwargs):
requires_backends(load_tf_weights_in_big_bird, ["torch"])
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BigBirdPegasusForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPegasusForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPegasusForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPegasusForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPegasusModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPegasusPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BlenderbotForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BlenderbotSmallForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotSmallForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotSmallModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotSmallPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CamembertForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CanineForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CanineModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CaninePreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_canine(*args, **kwargs):
requires_backends(load_tf_weights_in_canine, ["torch"])
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CLIPModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPTextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CLIPVisionModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ConvBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_convbert(*args, **kwargs):
requires_backends(load_tf_weights_in_convbert, ["torch"])
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CTRLForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DebertaForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DebertaV2ForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DeiTForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTForImageClassificationWithTeacher(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DeiTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DistilBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DPRContextEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedContextEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedQuestionEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedReader(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRQuestionEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRReader(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ElectraForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_electra(*args, **kwargs):
requires_backends(load_tf_weights_in_electra, ["torch"])
class EncoderDecoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FlaubertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertWithLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
FNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FNetForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FSMTForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FSMTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PretrainedFSMTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FunnelBaseModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_funnel(*args, **kwargs):
requires_backends(load_tf_weights_in_funnel, ["torch"])
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPT2DoubleHeadsModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2ForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2LMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_gpt2(*args, **kwargs):
requires_backends(load_tf_weights_in_gpt2, ["torch"])
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTNeoForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_gpt_neo(*args, **kwargs):
requires_backends(load_tf_weights_in_gpt_neo, ["torch"])
GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTJForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTJPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class HubertForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HubertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class IBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ImageGPTForCausalImageModeling(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ImageGPTForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ImageGPTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ImageGPTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_imagegpt(*args, **kwargs):
requires_backends(load_tf_weights_in_imagegpt, ["torch"])
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LayoutLMForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LayoutLMv2ForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv2ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv2ForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMv2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LED_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LEDForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LongformerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerSelfAttention(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LukeForEntityClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForEntityPairClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForEntitySpanClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukeModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LukePreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertVisualFeatureEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertXLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = None
class M2M100ForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class M2M100Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class M2M100PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarianForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarianModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarianMTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MegatronBertForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MegatronBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MMBTForClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MMBTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ModalEmbeddings(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MobileBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForNextSentencePrediction(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_mobilebert(*args, **kwargs):
requires_backends(load_tf_weights_in_mobilebert, ["torch"])
MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MPNetForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MT5EncoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MT5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MT5Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class NystromformerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NystromformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class OpenAIGPTDoubleHeadsModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_openai_gpt(*args, **kwargs):
requires_backends(load_tf_weights_in_openai_gpt, ["torch"])
class PegasusForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class PerceiverForImageClassificationConvProcessing(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForImageClassificationFourier(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForImageClassificationLearned(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForMultimodalAutoencoding(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForOpticalFlow(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PerceiverPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ProphetNetDecoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagSequenceForGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagTokenForGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
REALM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RealmEmbedder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmForOpenQA(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmKnowledgeAugEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmReader(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmRetriever(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RealmScorer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_realm(*args, **kwargs):
requires_backends(load_tf_weights_in_realm, ["torch"])
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ReformerAttention(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerModelWithLMHead(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RemBertForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RemBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_rembert(*args, **kwargs):
requires_backends(load_tf_weights_in_rembert, ["torch"])
RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RetriBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RetriBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RobertaForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RoFormerForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RoFormerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_roformer(*args, **kwargs):
requires_backends(load_tf_weights_in_roformer, ["torch"])
SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SegformerDecodeHead(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerForSemanticSegmentation(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SegformerPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SEW_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SEWForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SEWDForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWDForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWDModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SEWDPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SpeechEncoderDecoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Speech2TextForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2TextModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2TextPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2Text2ForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2Text2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SplinterForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SplinterPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SqueezeBertForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertModule(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SwinForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwinModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SwinPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
T5_PRETRAINED_MODEL_ARCHIVE_LIST = None
class T5EncoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5ForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_t5(*args, **kwargs):
requires_backends(load_tf_weights_in_t5, ["torch"])
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AdaptiveEmbedding(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_transfo_xl(*args, **kwargs):
requires_backends(load_tf_weights_in_transfo_xl, ["torch"])
TROCR_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TrOCRForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TrOCRPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST = None
class UniSpeechForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
UNISPEECH_SAT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class UniSpeechSatForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatForXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class UniSpeechSatPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VILT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViltForImageAndTextRetrieval(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltForImagesAndTextClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViltPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisionEncoderDecoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisionTextDualEncoderModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VISUAL_BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class VisualBertForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForRegionToPhraseAlignment(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertForVisualReasoning(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class VisualBertPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViTForImageClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViTMAEForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTMAELayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTMAEModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTMAEPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Wav2Vec2ForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForPreTraining(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2Model(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2PreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class WavLMForAudioFrameClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WavLMForCTC(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WavLMForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WavLMForXVector(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WavLMModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class WavLMPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XGLMForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XGLMModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XGLMPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMWithLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMProphetNetDecoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetEncoder(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetForConditionalGeneration(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMRobertaForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMRobertaXLForCausalLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaXLPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLNetForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForQuestionAnsweringSimple(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetLMHeadModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_xlnet(*args, **kwargs):
requires_backends(load_tf_weights_in_xlnet, ["torch"])
YOSO_PRETRAINED_MODEL_ARCHIVE_LIST = None
class YosoForMaskedLM(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoForMultipleChoice(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoForTokenClassification(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoLayer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class YosoPreTrainedModel(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Adafactor(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AdamW(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def get_constant_schedule(*args, **kwargs):
requires_backends(get_constant_schedule, ["torch"])
def get_constant_schedule_with_warmup(*args, **kwargs):
requires_backends(get_constant_schedule_with_warmup, ["torch"])
def get_cosine_schedule_with_warmup(*args, **kwargs):
requires_backends(get_cosine_schedule_with_warmup, ["torch"])
def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs):
requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"])
def get_linear_schedule_with_warmup(*args, **kwargs):
requires_backends(get_linear_schedule_with_warmup, ["torch"])
def get_polynomial_decay_schedule_with_warmup(*args, **kwargs):
requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"])
def get_scheduler(*args, **kwargs):
requires_backends(get_scheduler, ["torch"])
class Trainer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def torch_distributed_zero_first(*args, **kwargs):
requires_backends(torch_distributed_zero_first, ["torch"])
class Seq2SeqTrainer(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
| 23.34356
| 84
| 0.68745
|
587aee2a1f2d8cd34ee03c481a71ad57af15ae7f
| 2,829
|
py
|
Python
|
alipay/aop/api/response/AlipayTradeCustomsDeclareResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AlipayTradeCustomsDeclareResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AlipayTradeCustomsDeclareResponse.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayTradeCustomsDeclareResponse(AlipayResponse):
def __init__(self):
super(AlipayTradeCustomsDeclareResponse, self).__init__()
self._alipay_declare_no = None
self._currency = None
self._identity_check = None
self._pay_code = None
self._pay_transaction_id = None
self._total_amount = None
self._trade_no = None
self._ver_dept = None
@property
def alipay_declare_no(self):
return self._alipay_declare_no
@alipay_declare_no.setter
def alipay_declare_no(self, value):
self._alipay_declare_no = value
@property
def currency(self):
return self._currency
@currency.setter
def currency(self, value):
self._currency = value
@property
def identity_check(self):
return self._identity_check
@identity_check.setter
def identity_check(self, value):
self._identity_check = value
@property
def pay_code(self):
return self._pay_code
@pay_code.setter
def pay_code(self, value):
self._pay_code = value
@property
def pay_transaction_id(self):
return self._pay_transaction_id
@pay_transaction_id.setter
def pay_transaction_id(self, value):
self._pay_transaction_id = value
@property
def total_amount(self):
return self._total_amount
@total_amount.setter
def total_amount(self, value):
self._total_amount = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
@property
def ver_dept(self):
return self._ver_dept
@ver_dept.setter
def ver_dept(self, value):
self._ver_dept = value
def parse_response_content(self, response_content):
response = super(AlipayTradeCustomsDeclareResponse, self).parse_response_content(response_content)
if 'alipay_declare_no' in response:
self.alipay_declare_no = response['alipay_declare_no']
if 'currency' in response:
self.currency = response['currency']
if 'identity_check' in response:
self.identity_check = response['identity_check']
if 'pay_code' in response:
self.pay_code = response['pay_code']
if 'pay_transaction_id' in response:
self.pay_transaction_id = response['pay_transaction_id']
if 'total_amount' in response:
self.total_amount = response['total_amount']
if 'trade_no' in response:
self.trade_no = response['trade_no']
if 'ver_dept' in response:
self.ver_dept = response['ver_dept']
| 29.46875
| 106
| 0.663132
|
9d420fa6a6fa1baeda66992d3ce18a3d9c5a6464
| 7,949
|
py
|
Python
|
sdk/python/pulumi_azure_native/netapp/v20200801/backup.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/netapp/v20200801/backup.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/netapp/v20200801/backup.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['Backup']
class Backup(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
backup_name: Optional[pulumi.Input[str]] = None,
label: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
pool_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
volume_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Backup of a Volume
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[str] backup_name: The name of the backup
:param pulumi.Input[str] label: Label for backup
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] pool_name: The name of the capacity pool
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] volume_name: The name of the volume
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__['account_name'] = account_name
__props__['backup_name'] = backup_name
__props__['label'] = label
__props__['location'] = location
if pool_name is None and not opts.urn:
raise TypeError("Missing required property 'pool_name'")
__props__['pool_name'] = pool_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if volume_name is None and not opts.urn:
raise TypeError("Missing required property 'volume_name'")
__props__['volume_name'] = volume_name
__props__['backup_id'] = None
__props__['backup_type'] = None
__props__['creation_date'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['size'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Backup"), pulumi.Alias(type_="azure-native:netapp:Backup"), pulumi.Alias(type_="azure-nextgen:netapp:Backup"), pulumi.Alias(type_="azure-native:netapp/latest:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/latest:Backup"), pulumi.Alias(type_="azure-native:netapp/v20200501:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Backup"), pulumi.Alias(type_="azure-native:netapp/v20200601:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Backup"), pulumi.Alias(type_="azure-native:netapp/v20200701:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Backup"), pulumi.Alias(type_="azure-native:netapp/v20200901:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Backup"), pulumi.Alias(type_="azure-native:netapp/v20201101:Backup"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Backup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Backup, __self__).__init__(
'azure-native:netapp/v20200801:Backup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Backup':
"""
Get an existing Backup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["backup_id"] = None
__props__["backup_type"] = None
__props__["creation_date"] = None
__props__["label"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["size"] = None
__props__["type"] = None
return Backup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="backupId")
def backup_id(self) -> pulumi.Output[str]:
"""
UUID v4 used to identify the Backup
"""
return pulumi.get(self, "backup_id")
@property
@pulumi.getter(name="backupType")
def backup_type(self) -> pulumi.Output[str]:
"""
Type of backup adhoc or scheduled
"""
return pulumi.get(self, "backup_type")
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> pulumi.Output[str]:
"""
The creation date of the backup
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter
def label(self) -> pulumi.Output[Optional[str]]:
"""
Label for backup
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def size(self) -> pulumi.Output[float]:
"""
Size of backup
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 41.186528
| 935
| 0.630771
|
0c14456feea8602ddf535eb26e0fdfbcc2feb5c0
| 699
|
py
|
Python
|
swift/context.py
|
PdrLins/swift-morm
|
36f38cfd8d431d4530ead35ef2bf87eb46ee76ba
|
[
"MIT"
] | 2
|
2018-08-21T07:49:37.000Z
|
2019-01-18T20:58:57.000Z
|
swift/context.py
|
PdrLins/swift-morm
|
36f38cfd8d431d4530ead35ef2bf87eb46ee76ba
|
[
"MIT"
] | 1
|
2021-04-30T20:42:36.000Z
|
2021-04-30T20:42:36.000Z
|
swift/context.py
|
PdrLins/swift-morm
|
36f38cfd8d431d4530ead35ef2bf87eb46ee76ba
|
[
"MIT"
] | null | null | null |
from flask import current_app
from .sql_mapper import *
class Context(SwiftSqlMapper):
__providers_keys = []
def __init__(self, providers: []):
self.setProviders(providers)
@staticmethod
def get_context():
post_connString = current_app.config.get('POSTGRESQL_DATABASE_URI') # need to define the connection string
sqlServer_connString = current_app.config.get('SQLSERVER_DATABASE_URI') # need to define the connection string
ins_Post = InstanceProvide(post_connString, PostgresSqlProvider)
ins_sqlServer = InstanceProvide(sqlServer_connString, SqlServerProvider)
context = Context([ins_Post, ins_sqlServer])
return context
| 36.789474
| 118
| 0.736767
|
6a39fcc2ea37df572c7a571ac385cfe66f708a88
| 347
|
py
|
Python
|
passGen.py
|
diegofer11/RandomPassGenerator
|
ea6bba5b1b723d19675e61f22fe7ba353afa33d7
|
[
"MIT"
] | null | null | null |
passGen.py
|
diegofer11/RandomPassGenerator
|
ea6bba5b1b723d19675e61f22fe7ba353afa33d7
|
[
"MIT"
] | null | null | null |
passGen.py
|
diegofer11/RandomPassGenerator
|
ea6bba5b1b723d19675e61f22fe7ba353afa33d7
|
[
"MIT"
] | null | null | null |
from random import choice, choices
from string import ascii_letters, digits
def new_pass(paswlength):
"""Function that generates random password based on user given length."""
special = '#$%&()*+<=>?@_'
lists = [ascii_letters, digits, special]
pasw = ''.join(choice(choices(lists)[0]) for i in range(paswlength))
return pasw
| 31.545455
| 77
| 0.688761
|
4ea236c83e32e645d4f2e1d0d2ec2108126bb35b
| 2,481
|
py
|
Python
|
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/neutron_delete_security_group_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/neutron_delete_security_group_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/neutron_delete_security_group_response.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class NeutronDeleteSecurityGroupResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""NeutronDeleteSecurityGroupResponse - a model defined in huaweicloud sdk"""
super(NeutronDeleteSecurityGroupResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NeutronDeleteSecurityGroupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.848837
| 85
| 0.559452
|
49cc12ac429c8bf2fa88482731a36912580b6c2b
| 2,111
|
py
|
Python
|
python/tests/test_circumradius.py
|
jorgensd/asimov-contact
|
08704ade6343c346bc54dfd38186983cc7ab4485
|
[
"MIT"
] | null | null | null |
python/tests/test_circumradius.py
|
jorgensd/asimov-contact
|
08704ade6343c346bc54dfd38186983cc7ab4485
|
[
"MIT"
] | null | null | null |
python/tests/test_circumradius.py
|
jorgensd/asimov-contact
|
08704ade6343c346bc54dfd38186983cc7ab4485
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2021 Sarah Roggendorf and Jørgen S. Dokken
#
# SPDX-License-Identifier: MIT
import numpy as np
import pytest
import ufl
from dolfinx.fem import Function, FunctionSpace, IntegralType
from dolfinx.fem.petsc import LinearProblem
from dolfinx.mesh import create_unit_cube, create_unit_square, locate_entities_boundary
from mpi4py import MPI
import dolfinx_contact.cpp
@pytest.mark.parametrize("dim", [2, 3])
def test_circumradius(dim):
if dim == 3:
N = 5
mesh = create_unit_cube(MPI.COMM_WORLD, N, N, N)
else:
N = 25
mesh = create_unit_square(MPI.COMM_WORLD, N, N)
# Perturb geometry to get spatially varying circumradius
V = FunctionSpace(mesh, ufl.VectorElement("CG", mesh.ufl_cell(), 1))
u = Function(V)
if dim == 3:
u.interpolate(lambda x: (0.1 * (x[1] > 0.5), 0.1 * np.sin(2 * np.pi * x[2]), np.zeros(x.shape[1])))
else:
u.interpolate(lambda x: (0.1 * np.cos(x[1]) * (x[0] > 0.2), 0.1 * x[1] + 0.1 * np.sin(2 * np.pi * x[0])))
dolfinx_contact.update_geometry(u._cpp_object, mesh)
mesh.topology.create_connectivity(dim - 1, dim)
f_to_c = mesh.topology.connectivity(dim - 1, dim)
# Find facets on boundary to integrate over
facets = locate_entities_boundary(
mesh, mesh.topology.dim - 1, lambda x: np.full(x.shape[1], True, dtype=bool))
sorted = np.argsort(facets)
facets = facets[sorted]
h1 = ufl.Circumradius(mesh)
V = FunctionSpace(mesh, ("DG", 0))
v = ufl.TestFunction(V)
u = ufl.TrialFunction(V)
dx = ufl.Measure("dx", domain=mesh)
a = u * v * dx
L = h1 * v * dx
problem = LinearProblem(a, L, bcs=[], petsc_options={"ksp_type": "preonly", "pc_type": "lu"})
uh = problem.solve()
h2 = np.zeros(facets.size)
for i, facet in enumerate(facets):
cell = f_to_c.links(facet)[0]
h2[i] = uh.vector[cell]
active_facets = dolfinx_contact.cpp.compute_active_entities(mesh, facets, IntegralType.exterior_facet)
h = dolfinx_contact.pack_circumradius(mesh, active_facets).reshape(-1)
assert np.allclose(h, h2)
| 34.048387
| 113
| 0.657982
|
ac7967ab20a5c8843c54d560b1e54124771a2e02
| 2,768
|
py
|
Python
|
lab/tests/test_templatetags.py
|
lajarre/euphrosyne
|
14050097774b088e7345f9488ce74b205f7bd338
|
[
"MIT"
] | 1
|
2022-03-09T19:47:29.000Z
|
2022-03-09T19:47:29.000Z
|
lab/tests/test_templatetags.py
|
lajarre/euphrosyne
|
14050097774b088e7345f9488ce74b205f7bd338
|
[
"MIT"
] | null | null | null |
lab/tests/test_templatetags.py
|
lajarre/euphrosyne
|
14050097774b088e7345f9488ce74b205f7bd338
|
[
"MIT"
] | null | null | null |
# pylint: disable=redefined-outer-name
# pylint: disable=invalid-name
from unittest.mock import patch
import pytest
from django.contrib.admin.helpers import AdminForm
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.utils import flatten_fieldsets
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from django.urls import reverse
from lab.widgets import DisabledSelectWithHidden
from ..admin.run import RunAdmin
from ..models import Run
from ..templatetags.methods import (
_get_adminfield_name,
detector_fields,
filters_field,
method_fields,
)
@pytest.fixture(scope="module", autouse=True)
def patch_disabledselectwithhidden_optgroups():
with patch.object(
DisabledSelectWithHidden, "optgroups", return_value=[]
) as _fixture:
yield _fixture
@pytest.fixture(scope="module", autouse=True)
def adminform():
# pylint: disable=line-too-long
# Raincoat: pypi package: Django==4.0 path: django/contrib/admin/options.py element: ModelAdmin._changeform_view
run_admin = RunAdmin(model=Run, admin_site=AdminSite())
request = RequestFactory().get(reverse("admin:lab_run_add"))
request.user = AnonymousUser()
fieldsets = run_admin.get_fieldsets(request, None)
ModelForm = run_admin.get_form( # pylint: disable=invalid-name
request, None, change=False, fields=flatten_fieldsets(fieldsets)
)
initial = run_admin.get_changeform_initial_data(request)
form = ModelForm(initial=initial)
return AdminForm(
form,
list(fieldsets),
{},
readonly_fields=run_admin.get_readonly_fields(request, None),
model_admin=run_admin,
)
def test_method_fields_returns_all_methods_in_order(adminform):
assert [admin_field.field.name for admin_field in method_fields(adminform)] == [
f.name for f in Run.get_method_fields()
]
def test_method_fields_returns_the_same_at_second_instanciation(adminform):
assert method_fields(adminform)
assert [_get_adminfield_name(f) for f in method_fields(adminform)] == [
_get_adminfield_name(f) for f in method_fields(adminform)
]
def test_PIXE_detector_fields_returns_corresponding_detector_fieldnames_in_order(
adminform,
):
assert [
detector_admin_field.field.name
for detector_admin_field in detector_fields(adminform, "method_PIXE")
] == [
"detector_LE0",
"detector_HE1",
"detector_HE2",
"detector_HE3",
"detector_HE4",
]
def test_LE0_filters_fields_returns_corresponding_filter_fieldnames_in_order(adminform):
assert (
filters_field(adminform, "detector_LE0").field.name
== "filters_for_detector_LE0"
)
| 30.755556
| 116
| 0.739162
|
7a28b56325a0a9c07824e427a4fac0c1cf48e5c7
| 1,783
|
py
|
Python
|
python2/tests/SMBConnectionTests/test_SMBHandler.py
|
Awingu/pysmb
|
93f442a0e4d92d0a9047184cc3e048d5b428f228
|
[
"Zlib"
] | 280
|
2015-01-21T14:34:41.000Z
|
2022-03-02T03:36:05.000Z
|
python2/tests/SMBConnectionTests/test_SMBHandler.py
|
Awingu/pysmb
|
93f442a0e4d92d0a9047184cc3e048d5b428f228
|
[
"Zlib"
] | 160
|
2015-01-09T22:05:44.000Z
|
2022-03-29T11:34:44.000Z
|
python2/tests/SMBConnectionTests/test_SMBHandler.py
|
Awingu/pysmb
|
93f442a0e4d92d0a9047184cc3e048d5b428f228
|
[
"Zlib"
] | 101
|
2015-01-17T21:12:22.000Z
|
2022-01-26T11:12:16.000Z
|
# -*- coding: utf-8 -*-
import os, urllib, urllib2, time, random
from StringIO import StringIO
from smb.SMBHandler import SMBHandler
import util
try:
import hashlib
def MD5(): return hashlib.md5()
except ImportError:
import md5
def MD5(): return md5.new()
def test_basic():
# Basic test for smb URLs
director = urllib2.build_opener(SMBHandler)
fh = director.open('smb://%(user)s:%(password)s@%(server_ip)s/smbtest/rfc1001.txt' % util.getConnectionInfo())
s = fh.read()
md = MD5()
md.update(s)
assert md.hexdigest() == '5367c2bbf97f521059c78eab65309ad3'
assert len(s) == 158437
fh.close()
def test_unicode():
# Test smb URLs with unicode paths
director = urllib2.build_opener(SMBHandler)
fh = director.open(u'smb://%(user)s:%(password)s@%(server_ip)s/smbtest/测试文件夹/垃圾文件.dat' % util.getConnectionInfo())
s = fh.read()
md = MD5()
md.update(s)
assert md.hexdigest() == '8a44c1e80d55e91c92350955cdf83442'
assert len(s) == 256000
fh.close()
TEST_FILENAME = os.path.join(os.path.dirname(__file__), os.pardir, 'SupportFiles', 'binary.dat')
TEST_FILESIZE = 256000
TEST_DIGEST = 'bb6303f76e29f354b6fdf6ef58587e48'
def test_upload():
info = util.getConnectionInfo()
info['filename'] = os.sep + 'StoreTest-%d-%d.dat' % ( time.time(), random.randint(0, 10000) )
director = urllib2.build_opener(SMBHandler)
upload_fh = director.open('smb://%(user)s:%(password)s@%(server_ip)s/smbtest/%(filename)s' % info, data = open(TEST_FILENAME, 'rb'))
retr_fh = director.open('smb://%(user)s:%(password)s@%(server_ip)s/smbtest/%(filename)s' % info)
s = retr_fh.read()
md = MD5()
md.update(s)
assert md.hexdigest() == TEST_DIGEST
assert len(s) == TEST_FILESIZE
| 26.61194
| 136
| 0.667414
|
98bd8ef69536460803d892ac98811299af3d30f3
| 931
|
py
|
Python
|
troposphereWinds/Zonal/250v850.py
|
Mihir-DG/Tropospheric-Temperature-and-Zonal-Wind-Profiles
|
13e0c706faed446f10758341807c066670260e0b
|
[
"MIT"
] | null | null | null |
troposphereWinds/Zonal/250v850.py
|
Mihir-DG/Tropospheric-Temperature-and-Zonal-Wind-Profiles
|
13e0c706faed446f10758341807c066670260e0b
|
[
"MIT"
] | null | null | null |
troposphereWinds/Zonal/250v850.py
|
Mihir-DG/Tropospheric-Temperature-and-Zonal-Wind-Profiles
|
13e0c706faed446f10758341807c066670260e0b
|
[
"MIT"
] | null | null | null |
from netCDF4 import Dataset as dst
from matplotlib import pyplot as plt
nc = dst('../uwnd.mon.mean.nc', mode='r')
iterator = -120
while iterator < 0:
profile850 = []
for latitude in list(nc.variables['uwnd'])[iterator][2]:
item = []
for longitude in latitude:
item.append(longitude)
iterator += 1
average = sum(item)/len(item)
profile850.append(average)
iterator_a = -120
while iterator_a < 0:
profile250 = []
for latitude in list(nc.variables['uwnd'])[iterator_a][8]:
item2 = []
for longitude in latitude:
item2.append(longitude)
iterator_a += 1
average = sum(item2)/len(item2)
profile250.append(average)
print(nc.variables['level'][:])
a = plt.plot(nc.variables['lat'][:],profile850, color='r', label='850 mbar')
b = plt.plot(nc.variables['lat'][:],profile250, color='g', label='250 mbar')
plt.xlabel("Latitude (°)")
plt.ylabel("Wind (m/s)")
plt.legend()
plt.savefig("plot_250v850.png")
plt.show()
| 28.212121
| 76
| 0.684211
|
dbee3b4e48e997eb8cb4e5b5c1c1e4f19a96d714
| 511
|
py
|
Python
|
news/migrations/0005_auto_20180824_0009.py
|
Akshita-Rastogi/Summarizer
|
b8ad34720428c906ee9b6ce8c1a3b8ec01787cd5
|
[
"MIT"
] | 9
|
2020-06-08T20:03:42.000Z
|
2021-06-24T10:52:02.000Z
|
news/migrations/0005_auto_20180824_0009.py
|
Akshita-Rastogi/Summarizer
|
b8ad34720428c906ee9b6ce8c1a3b8ec01787cd5
|
[
"MIT"
] | 7
|
2020-06-06T01:26:08.000Z
|
2022-02-10T11:26:58.000Z
|
news/migrations/0005_auto_20180824_0009.py
|
shubham1507/sumup
|
053766da121ae22b3d04c87592ffab5e9e274ea8
|
[
"MIT"
] | 7
|
2018-11-25T08:54:56.000Z
|
2021-12-21T21:12:19.000Z
|
# Generated by Django 2.0.5 on 2018-08-23 18:24
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0004_auto_20180824_0001'),
]
operations = [
migrations.AlterField(
model_name='document',
name='summary_p',
field=models.IntegerField(validators=[django.core.validators.MinValueValidator(10), django.core.validators.MaxValueValidator(50)]),
),
]
| 25.55
| 143
| 0.661448
|
f8318b4fe7374e790ed739188a05afaa66e0eff7
| 4,038
|
py
|
Python
|
custom_components/samsungtv_smart/api/upnp.py
|
hwikene/ha-samsungtv-smart
|
47a44da6a15af8084e56223e94c41e0f206327d5
|
[
"Apache-2.0"
] | null | null | null |
custom_components/samsungtv_smart/api/upnp.py
|
hwikene/ha-samsungtv-smart
|
47a44da6a15af8084e56223e94c41e0f206327d5
|
[
"Apache-2.0"
] | null | null | null |
custom_components/samsungtv_smart/api/upnp.py
|
hwikene/ha-samsungtv-smart
|
47a44da6a15af8084e56223e94c41e0f206327d5
|
[
"Apache-2.0"
] | null | null | null |
# Smartthings TV integration#
# import requests
import xml.etree.ElementTree as ET
from aiohttp import ClientSession
from async_timeout import timeout
from typing import Optional
DEFAULT_TIMEOUT = 0.2
class upnp:
def __init__(self, host, session: Optional[ClientSession] = None):
self._host = host
self._mute = False
self._volume = 0
self._connected = False
if session:
self._session = session
self._managed_session = False
else:
self._session = ClientSession()
self._managed_session = True
def __enter__(self):
return self
async def _SOAPrequest(self, action, arguments, protocole):
headers = {
"SOAPAction": '"urn:schemas-upnp-org:service:{protocole}:1#{action}"'.format(
action=action, protocole=protocole
),
"content-type": "text/xml",
}
body = """<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<u:{action} xmlns:u="urn:schemas-upnp-org:service:{protocole}:1">
<InstanceID>0</InstanceID>
{arguments}
</u:{action}>
</s:Body>
</s:Envelope>""".format(
action=action, arguments=arguments, protocole=protocole
)
response = None
try:
with timeout(DEFAULT_TIMEOUT):
async with self._session.post(
f"http://{self._host}:9197/upnp/control/{protocole}1",
headers=headers,
data=body,
raise_for_status=True,
) as resp:
response = await resp.content.read()
self._connected = True
except:
self._connected = False
return response
@property
def connected(self):
return self._connected
async def async_get_volume(self):
response = await self._SOAPrequest(
"GetVolume", "<Channel>Master</Channel>", "RenderingControl"
)
if response is not None:
volume_xml = response.decode("utf8")
tree = ET.fromstring(volume_xml)
for elem in tree.iter(tag="CurrentVolume"):
self._volume = elem.text
return self._volume
async def async_set_volume(self, volume):
await self._SOAPrequest(
"SetVolume",
"<Channel>Master</Channel><DesiredVolume>{}</DesiredVolume>".format(volume),
"RenderingControl",
)
async def async_get_mute(self):
response = await self._SOAPrequest(
"GetMute", "<Channel>Master</Channel>", "RenderingControl"
)
if response is not None:
# mute_xml = response.decode('utf8')
tree = ET.fromstring(response.decode("utf8"))
mute = 0
for elem in tree.iter(tag="CurrentMute"):
mute = elem.text
if int(mute) == 0:
self._mute = False
else:
self._mute = True
return self._mute
async def async_set_current_media(self, url):
""" Set media to playback and play it."""
try:
await self._SOAPrequest(
"SetAVTransportURI",
"<CurrentURI>{url}</CurrentURI><CurrentURIMetaData></CurrentURIMetaData>".format(
url=url
),
"AVTransport",
)
await self._SOAPrequest("Play", "<Speed>1</Speed>", "AVTransport")
except Exception:
pass
async def async_play(self):
""" Play media that was already set as current."""
try:
await self._SOAPrequest("Play", "<Speed>1</Speed>", "AVTransport")
except Exception:
pass
| 33.65
| 140
| 0.540367
|
37d641b4258b341cbae037d1f4e0e4b957f6e889
| 3,722
|
py
|
Python
|
functions/ginput.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
functions/ginput.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
functions/ginput.py
|
chiluf/visvis.dev
|
373846ea25044b7ca50f44c63dab4248e14deacd
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
import visvis as vv
from visvis.pypoints import Pointset
import time
class GinputHelper(object):
""" GinputHelper()
Helper class for ginput function.
Keeps track of things.
"""
def __init__(self):
self.axes = None
self.pp = None
self.N = 0
def Start(self, axes, pp, N):
self.Unregister()
if axes:
# Store pointset and axes
self.axes = axes
self.pp = pp
self.N = N
# Register with axes
self.axes.eventMouseDown.Bind(self.OnMouseDown)
self.axes.eventDoubleClick.Bind(self.OnDoubleClick)
# Register with figure
fig = self.axes.GetFigure()
if fig:
fig.eventKeyDown.Unbind(self.OnKeyDown)
fig.eventKeyDown.Bind(self.OnKeyDown)
def Unregister(self):
if self.axes:
# Unregister axes
self.axes.eventMouseDown.Unbind(self.OnMouseDown)
self.axes.eventDoubleClick.Unbind(self.OnDoubleClick)
# Unreister figure
fig = self.axes.GetFigure()
if fig:
fig.eventKeyDown.Unbind(self.OnKeyDown)
fig.eventKeyDown.Bind(self.OnKeyDown)
# Remove references
self.axes = None
self.pp = None
self.N = 0
def OnMouseDown(self, event):
# Add point
if self.pp is not None:
self.pp.append(event.x2d, event.y2d, 0.1)
# Draw
if self.axes:
fig = self.axes.GetFigure()
if fig:
fig.DrawNow()
# Is this the last point?
if self.pp is not None and self.N and len(self.pp)==self.N:
self.Unregister()
# Accept event
return True
def OnDoubleClick(self, event):
self.Unregister()
def OnKeyDown(self, event):
if event.key == vv.KEY_ENTER:
self.Unregister()
ginputHelper = GinputHelper()
def ginput(N=0, axes=None, ms='+', **kwargs):
""" ginput(N=0, axes=None, ms='+', **kwargs)
Graphical input: select N number of points with the mouse.
Returns a 2D pointset.
Parameters
----------
N : int
The maximum number of points to capture. If N=0, will keep capturing
until the user stops it. The capturing always stops when enter is
pressed or the mouse is double clicked. In the latter case a final
point is added.
axes : Axes instance
The axes to capture the points in, or the current axes if not given.
ms : markerStyle
The marker style to use for the points. See plot.
Any other keyword arguments are passed to plot to show the selected
points and the lines between them.
"""
# Get axes
if not axes:
axes = vv.gca()
# Get figure
fig = axes.GetFigure()
if not fig:
return
# Init pointset, helper, and line object
line = vv.plot(Pointset(2), axes=axes, ms=ms, **kwargs)
pp = line._points
ginputHelper.Start(axes, pp, N)
# Enter a loop
while ginputHelper.axes:
fig._ProcessGuiEvents()
time.sleep(0.1)
# Remove line object and return points
pp = Pointset(pp[:,:2])
line.Destroy()
return pp
if __name__ == '__main__':
print vv.ginput(3)
| 25.847222
| 76
| 0.551316
|
58b93c6b7dea7de41e3b25ea8d52565a07f2e200
| 3,635
|
py
|
Python
|
indico/modules/events/registration/models/invitations.py
|
uxmaster/indico
|
ecd19f17ef6fdc9f5584f59c87ec647319ce5d31
|
[
"MIT"
] | 1
|
2019-11-03T11:34:16.000Z
|
2019-11-03T11:34:16.000Z
|
indico/modules/events/registration/models/invitations.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
indico/modules/events/registration/models/invitations.py
|
NP-compete/indico
|
80db7ca0ef9d1f3240a16b9ff2d84bf0bf26c549
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from uuid import uuid4
from sqlalchemy.dialects.postgresql import UUID
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.util.i18n import L_
from indico.util.locators import locator_property
from indico.util.string import format_repr, return_ascii
from indico.util.struct.enum import RichIntEnum
class InvitationState(RichIntEnum):
__titles__ = [L_('Pending'), L_('Accepted'), L_('Declined')]
pending = 0
accepted = 1
declined = 2
class RegistrationInvitation(db.Model):
"""An invitation for someone to register"""
__tablename__ = 'invitations'
__table_args__ = (db.CheckConstraint("(state = {state}) OR (registration_id IS NULL)"
.format(state=InvitationState.accepted), name='registration_state'),
db.UniqueConstraint('registration_form_id', 'email'),
{'schema': 'event_registration'})
#: The ID of the invitation
id = db.Column(
db.Integer,
primary_key=True
)
#: The UUID of the invitation
uuid = db.Column(
UUID,
index=True,
unique=True,
nullable=False,
default=lambda: unicode(uuid4())
)
#: The ID of the registration form
registration_form_id = db.Column(
db.Integer,
db.ForeignKey('event_registration.forms.id'),
index=True,
nullable=False
)
#: The ID of the registration (if accepted)
registration_id = db.Column(
db.Integer,
db.ForeignKey('event_registration.registrations.id'),
index=True,
unique=True,
nullable=True
)
#: The state of the invitation
state = db.Column(
PyIntEnum(InvitationState),
nullable=False,
default=InvitationState.pending
)
#: Whether registration moderation should be skipped
skip_moderation = db.Column(
db.Boolean,
nullable=False,
default=False
)
#: The email of the invited person
email = db.Column(
db.String,
nullable=False
)
#: The first name of the invited person
first_name = db.Column(
db.String,
nullable=False
)
#: The last name of the invited person
last_name = db.Column(
db.String,
nullable=False
)
#: The affiliation of the invited person
affiliation = db.Column(
db.String,
nullable=False
)
#: The associated registration
registration = db.relationship(
'Registration',
lazy=True,
backref=db.backref(
'invitation',
lazy=True,
uselist=False
)
)
# relationship backrefs:
# - registration_form (RegistrationForm.invitations)
@locator_property
def locator(self):
return dict(self.registration_form.locator, invitation_id=self.id)
@locator.uuid
def locator(self):
"""A locator suitable for 'display' pages.
Instead of the numeric ID it uses the UUID
"""
assert self.uuid is not None
return dict(self.registration_form.locator, invitation=self.uuid)
@return_ascii
def __repr__(self):
full_name = '{} {}'.format(self.first_name, self.last_name)
return format_repr(self, 'id', 'registration_form_id', 'email', 'state', _text=full_name)
| 28.178295
| 109
| 0.634388
|
8cc8517af231199933b870acd3c0d7b4bd57072a
| 1,803
|
py
|
Python
|
cubic/audio_io.py
|
cobaltspeech/examples-python
|
74a8ad1c48c46cb029ede389baedbd44fccf9dd0
|
[
"Apache-2.0"
] | null | null | null |
cubic/audio_io.py
|
cobaltspeech/examples-python
|
74a8ad1c48c46cb029ede389baedbd44fccf9dd0
|
[
"Apache-2.0"
] | 2
|
2021-07-22T15:57:53.000Z
|
2021-08-03T22:29:36.000Z
|
cubic/audio_io.py
|
cobaltspeech/examples-python
|
74a8ad1c48c46cb029ede389baedbd44fccf9dd0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright(2021) Cobalt Speech and Language Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
class Recorder(object):
"""Recorder launches an external application to handle recording audio."""
def __init__(self, cmd):
self.args = cmd.split()
self.process = None
def start(self):
"""Start the external recording application."""
# Ignore if we already started it
if self.process is not None:
return
# Start the subprocess
self.process = subprocess.Popen(args=self.args,
stdout=subprocess.PIPE)
def stop(self):
"""Stop the external recording application."""
# Ignore if it is not running
if self.process is None:
return
# Stop the subprocess
self.process.stdout.close()
self.process.terminate()
self.process = None
def read(self, bufsize):
"""Read audio data from the external recording application."""
# Raise an error if we haven't started the app
if self.process is None:
raise RuntimeError("Recording application is not running")
# Get the data from stdout
return self.process.stdout.read(bufsize)
| 30.559322
| 78
| 0.652246
|
61b081e2c76072d99ea2283632a4f35d260cc21a
| 2,478
|
py
|
Python
|
examples/python/mesapy_echo.py
|
yc-huang/incubator-teaclave
|
f7614d12b98984febd26c327f544c04cc2b94f98
|
[
"Apache-2.0"
] | null | null | null |
examples/python/mesapy_echo.py
|
yc-huang/incubator-teaclave
|
f7614d12b98984febd26c327f544c04cc2b94f98
|
[
"Apache-2.0"
] | null | null | null |
examples/python/mesapy_echo.py
|
yc-huang/incubator-teaclave
|
f7614d12b98984febd26c327f544c04cc2b94f98
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
from teaclave import (AuthenticationService, FrontendService,
AuthenticationClient, FrontendClient)
from utils import (AUTHENTICATION_SERVICE_ADDRESS, FRONTEND_SERVICE_ADDRESS,
AS_ROOT_CA_CERT_PATH, ENCLAVE_INFO_PATH, USER_ID,
USER_PASSWORD)
class MesaPyEchoExample:
def __init__(self, user_id, user_password):
self.user_id = user_id
self.user_password = user_password
def echo(self,
payload_file="mesapy_echo_payload.py",
message="Hello, Teaclave!"):
client = AuthenticationService(
AUTHENTICATION_SERVICE_ADDRESS, AS_ROOT_CA_CERT_PATH,
ENCLAVE_INFO_PATH).connect().get_client()
print("[+] registering user")
client.user_register(self.user_id, self.user_password)
print("[+] login")
token = client.user_login(self.user_id, self.user_password)
client = FrontendService(FRONTEND_SERVICE_ADDRESS,
AS_ROOT_CA_CERT_PATH,
ENCLAVE_INFO_PATH).connect().get_client()
metadata = {"id": self.user_id, "token": token}
client.metadata = metadata
print("[+] registering function")
with open(payload_file, "rb") as f:
payload = f.read()
function_id = client.register_function(
name="mesapy-echo",
description="An echo function implemented in Python",
executor_type="python",
payload=list(payload),
arguments=["message"])
print("[+] creating task")
task_id = client.create_task(function_id=function_id,
function_arguments={"message": message},
executor="mesapy")
print("[+] invoking task")
client.invoke_task(task_id)
print("[+] getting result")
result = client.get_task_result(task_id)
print("[+] done")
return bytes(result)
def main():
example = MesaPyEchoExample(USER_ID, USER_PASSWORD)
if len(sys.argv) == 2:
message = sys.argv[1]
rt = example.echo(message=message)
elif len(sys.argv) == 3:
payload = sys.argv[1]
message = sys.argv[2]
rt = example.echo(payload, message)
else:
rt = example.echo()
print("[+] function return: ", rt)
if __name__ == '__main__':
main()
| 31.367089
| 77
| 0.592817
|
32beb35ad6e2c510eceef4909c0999c7bd172187
| 22,172
|
py
|
Python
|
grammar_induction/earley_parser/nltk/parse/featurechart.py
|
tdonca/OpenBottle
|
f03d80e7b3645232fb97f91cf7fc2dc02f101ac2
|
[
"MIT"
] | 6
|
2017-01-22T03:15:01.000Z
|
2019-12-01T16:19:36.000Z
|
grammar_induction/earley_parser/nltk/parse/featurechart.py
|
tdonca/OpenBottle
|
f03d80e7b3645232fb97f91cf7fc2dc02f101ac2
|
[
"MIT"
] | 3
|
2017-01-15T01:32:04.000Z
|
2017-01-16T00:25:46.000Z
|
grammar_induction/earley_parser/nltk/parse/featurechart.py
|
tdonca/OpenBottle
|
f03d80e7b3645232fb97f91cf7fc2dc02f101ac2
|
[
"MIT"
] | 6
|
2017-01-19T21:49:55.000Z
|
2021-04-14T09:57:17.000Z
|
# -*- coding: utf-8 -*-
# Natural Language Toolkit: Chart Parser for Feature-Based Grammars
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Rob Speer <rspeer@mit.edu>
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Extension of chart parsing implementation to handle grammars with
feature structures as nodes.
"""
from __future__ import print_function, unicode_literals
from nltk.compat import xrange, python_2_unicode_compatible
from nltk.featstruct import FeatStruct, unify, TYPE, find_variables
from nltk.sem import logic
from nltk.tree import Tree
from nltk.grammar import (Nonterminal, Production, CFG,
FeatStructNonterminal, is_nonterminal,
is_terminal)
from nltk.parse.chart import (TreeEdge, Chart, ChartParser, EdgeI,
FundamentalRule, LeafInitRule,
EmptyPredictRule, BottomUpPredictRule,
SingleEdgeFundamentalRule,
BottomUpPredictCombineRule,
CachedTopDownPredictRule,
TopDownInitRule)
#////////////////////////////////////////////////////////////
# Tree Edge
#////////////////////////////////////////////////////////////
@python_2_unicode_compatible
class FeatureTreeEdge(TreeEdge):
"""
A specialized tree edge that allows shared variable bindings
between nonterminals on the left-hand side and right-hand side.
Each ``FeatureTreeEdge`` contains a set of ``bindings``, i.e., a
dictionary mapping from variables to values. If the edge is not
complete, then these bindings are simply stored. However, if the
edge is complete, then the constructor applies these bindings to
every nonterminal in the edge whose symbol implements the
interface ``SubstituteBindingsI``.
"""
def __init__(self, span, lhs, rhs, dot=0, bindings=None):
"""
Construct a new edge. If the edge is incomplete (i.e., if
``dot<len(rhs)``), then store the bindings as-is. If the edge
is complete (i.e., if ``dot==len(rhs)``), then apply the
bindings to all nonterminals in ``lhs`` and ``rhs``, and then
clear the bindings. See ``TreeEdge`` for a description of
the other arguments.
"""
if bindings is None: bindings = {}
# If the edge is complete, then substitute in the bindings,
# and then throw them away. (If we didn't throw them away, we
# might think that 2 complete edges are different just because
# they have different bindings, even though all bindings have
# already been applied.)
if dot == len(rhs) and bindings:
lhs = self._bind(lhs, bindings)
rhs = [self._bind(elt, bindings) for elt in rhs]
bindings = {}
# Initialize the edge.
TreeEdge.__init__(self, span, lhs, rhs, dot)
self._bindings = bindings
self._comparison_key = (self._comparison_key, tuple(sorted(bindings.items())))
@staticmethod
def from_production(production, index):
"""
:return: A new ``TreeEdge`` formed from the given production.
The new edge's left-hand side and right-hand side will
be taken from ``production``; its span will be
``(index,index)``; and its dot position will be ``0``.
:rtype: TreeEdge
"""
return FeatureTreeEdge(span=(index, index), lhs=production.lhs(),
rhs=production.rhs(), dot=0)
def move_dot_forward(self, new_end, bindings=None):
"""
:return: A new ``FeatureTreeEdge`` formed from this edge.
The new edge's dot position is increased by ``1``,
and its end index will be replaced by ``new_end``.
:rtype: FeatureTreeEdge
:param new_end: The new end index.
:type new_end: int
:param bindings: Bindings for the new edge.
:type bindings: dict
"""
return FeatureTreeEdge(span=(self._span[0], new_end),
lhs=self._lhs, rhs=self._rhs,
dot=self._dot+1, bindings=bindings)
def _bind(self, nt, bindings):
if not isinstance(nt, FeatStructNonterminal): return nt
return nt.substitute_bindings(bindings)
def next_with_bindings(self):
return self._bind(self.nextsym(), self._bindings)
def bindings(self):
"""
Return a copy of this edge's bindings dictionary.
"""
return self._bindings.copy()
def variables(self):
"""
:return: The set of variables used by this edge.
:rtype: set(Variable)
"""
return find_variables([self._lhs] + list(self._rhs) +
list(self._bindings.keys()) +
list(self._bindings.values()),
fs_class=FeatStruct)
def __str__(self):
if self.is_complete():
return TreeEdge.__unicode__(self)
else:
bindings = '{%s}' % ', '.join('%s: %r' % item for item in
sorted(self._bindings.items()))
return '%s %s' % (TreeEdge.__unicode__(self), bindings)
#////////////////////////////////////////////////////////////
# A specialized Chart for feature grammars
#////////////////////////////////////////////////////////////
# TODO: subsumes check when adding new edges
class FeatureChart(Chart):
"""
A Chart for feature grammars.
:see: ``Chart`` for more information.
"""
def select(self, **restrictions):
"""
Returns an iterator over the edges in this chart.
See ``Chart.select`` for more information about the
``restrictions`` on the edges.
"""
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(self._edges)
# Find the index corresponding to the given restrictions.
restr_keys = sorted(restrictions.keys())
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(self._get_type_if_possible(restrictions[key])
for key in restr_keys)
return iter(self._indexes[restr_keys].get(vals, []))
def _add_index(self, restr_keys):
"""
A helper function for ``select``, which creates a new index for
a given set of attributes (aka restriction keys).
"""
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError('Bad restriction: %s' % key)
# Create the index.
index = self._indexes[restr_keys] = {}
# Add all existing edges to the index.
for edge in self._edges:
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
"""
A helper function for ``insert``, which registers the new
edge with all existing indexes.
"""
for (restr_keys, index) in self._indexes.items():
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
index.setdefault(vals, []).append(edge)
def _get_type_if_possible(self, item):
"""
Helper function which returns the ``TYPE`` feature of the ``item``,
if it exists, otherwise it returns the ``item`` itself
"""
if isinstance(item, dict) and TYPE in item:
return item[TYPE]
else:
return item
def parses(self, start, tree_class=Tree):
for edge in self.select(start=0, end=self._num_leaves):
if ((isinstance(edge, FeatureTreeEdge)) and
(edge.lhs()[TYPE] == start[TYPE]) and
(unify(edge.lhs(), start, rename_vars=True))
):
for tree in self.trees(edge, complete=True, tree_class=tree_class):
yield tree
#////////////////////////////////////////////////////////////
# Fundamental Rule
#////////////////////////////////////////////////////////////
class FeatureFundamentalRule(FundamentalRule):
"""
A specialized version of the fundamental rule that operates on
nonterminals whose symbols are ``FeatStructNonterminal``s. Rather
tha simply comparing the nonterminals for equality, they are
unified. Variable bindings from these unifications are collected
and stored in the chart using a ``FeatureTreeEdge``. When a
complete edge is generated, these bindings are applied to all
nonterminals in the edge.
The fundamental rule states that:
- ``[A -> alpha \* B1 beta][i:j]``
- ``[B2 -> gamma \*][j:k]``
licenses the edge:
- ``[A -> alpha B3 \* beta][i:j]``
assuming that B1 and B2 can be unified to generate B3.
"""
def apply(self, chart, grammar, left_edge, right_edge):
# Make sure the rule is applicable.
if not (left_edge.end() == right_edge.start() and
left_edge.is_incomplete() and
right_edge.is_complete() and
isinstance(left_edge, FeatureTreeEdge)):
return
found = right_edge.lhs()
nextsym = left_edge.nextsym()
if isinstance(right_edge, FeatureTreeEdge):
if not is_nonterminal(nextsym): return
if left_edge.nextsym()[TYPE] != right_edge.lhs()[TYPE]: return
# Create a copy of the bindings.
bindings = left_edge.bindings()
# We rename vars here, because we don't want variables
# from the two different productions to match.
found = found.rename_variables(used_vars=left_edge.variables())
# Unify B1 (left_edge.nextsym) with B2 (right_edge.lhs) to
# generate B3 (result).
result = unify(nextsym, found, bindings, rename_vars=False)
if result is None: return
else:
if nextsym != found: return
# Create a copy of the bindings.
bindings = left_edge.bindings()
# Construct the new edge.
new_edge = left_edge.move_dot_forward(right_edge.end(), bindings)
# Add it to the chart, with appropriate child pointers.
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class FeatureSingleEdgeFundamentalRule(SingleEdgeFundamentalRule):
"""
A specialized version of the completer / single edge fundamental rule
that operates on nonterminals whose symbols are ``FeatStructNonterminal``s.
Rather than simply comparing the nonterminals for equality, they are
unified.
"""
_fundamental_rule = FeatureFundamentalRule()
def _apply_complete(self, chart, grammar, right_edge):
fr = self._fundamental_rule
for left_edge in chart.select(end=right_edge.start(),
is_complete=False,
nextsym=right_edge.lhs()):
for new_edge in fr.apply(chart, grammar, left_edge, right_edge):
yield new_edge
def _apply_incomplete(self, chart, grammar, left_edge):
fr = self._fundamental_rule
for right_edge in chart.select(start=left_edge.end(),
is_complete=True,
lhs=left_edge.nextsym()):
for new_edge in fr.apply(chart, grammar, left_edge, right_edge):
yield new_edge
#////////////////////////////////////////////////////////////
# Top-Down Prediction
#////////////////////////////////////////////////////////////
class FeatureTopDownInitRule(TopDownInitRule):
def apply(self, chart, grammar):
for prod in grammar.productions(lhs=grammar.start()):
new_edge = FeatureTreeEdge.from_production(prod, 0)
if chart.insert(new_edge, ()):
yield new_edge
class FeatureTopDownPredictRule(CachedTopDownPredictRule):
"""
A specialized version of the (cached) top down predict rule that operates
on nonterminals whose symbols are ``FeatStructNonterminal``s. Rather
than simply comparing the nonterminals for equality, they are
unified.
The top down expand rule states that:
- ``[A -> alpha \* B1 beta][i:j]``
licenses the edge:
- ``[B2 -> \* gamma][j:j]``
for each grammar production ``B2 -> gamma``, assuming that B1
and B2 can be unified.
"""
def apply(self, chart, grammar, edge):
if edge.is_complete(): return
nextsym, index = edge.nextsym(), edge.end()
if not is_nonterminal(nextsym): return
# If we've already applied this rule to an edge with the same
# next & end, and the chart & grammar have not changed, then
# just return (no new edges to add).
nextsym_with_bindings = edge.next_with_bindings()
done = self._done.get((nextsym_with_bindings, index), (None, None))
if done[0] is chart and done[1] is grammar:
return
for prod in grammar.productions(lhs=nextsym):
# If the left corner in the predicted production is
# leaf, it must match with the input.
if prod.rhs():
first = prod.rhs()[0]
if is_terminal(first):
if index >= chart.num_leaves(): continue
if first != chart.leaf(index): continue
# We rename vars here, because we don't want variables
# from the two different productions to match.
if unify(prod.lhs(), nextsym_with_bindings, rename_vars=True):
new_edge = FeatureTreeEdge.from_production(prod, edge.end())
if chart.insert(new_edge, ()):
yield new_edge
# Record the fact that we've applied this rule.
self._done[nextsym_with_bindings, index] = (chart, grammar)
#////////////////////////////////////////////////////////////
# Bottom-Up Prediction
#////////////////////////////////////////////////////////////
class FeatureBottomUpPredictRule(BottomUpPredictRule):
def apply(self, chart, grammar, edge):
if edge.is_incomplete(): return
for prod in grammar.productions(rhs=edge.lhs()):
if isinstance(edge, FeatureTreeEdge):
_next = prod.rhs()[0]
if not is_nonterminal(_next): continue
new_edge = FeatureTreeEdge.from_production(prod, edge.start())
if chart.insert(new_edge, ()):
yield new_edge
class FeatureBottomUpPredictCombineRule(BottomUpPredictCombineRule):
def apply(self, chart, grammar, edge):
if edge.is_incomplete(): return
found = edge.lhs()
for prod in grammar.productions(rhs=found):
bindings = {}
if isinstance(edge, FeatureTreeEdge):
_next = prod.rhs()[0]
if not is_nonterminal(_next): continue
# We rename vars here, because we don't want variables
# from the two different productions to match.
used_vars = find_variables((prod.lhs(),) + prod.rhs(),
fs_class=FeatStruct)
found = found.rename_variables(used_vars=used_vars)
result = unify(_next, found, bindings, rename_vars=False)
if result is None: continue
new_edge = (FeatureTreeEdge.from_production(prod, edge.start())
.move_dot_forward(edge.end(), bindings))
if chart.insert(new_edge, (edge,)):
yield new_edge
class FeatureEmptyPredictRule(EmptyPredictRule):
def apply(self, chart, grammar):
for prod in grammar.productions(empty=True):
for index in xrange(chart.num_leaves() + 1):
new_edge = FeatureTreeEdge.from_production(prod, index)
if chart.insert(new_edge, ()):
yield new_edge
#////////////////////////////////////////////////////////////
# Feature Chart Parser
#////////////////////////////////////////////////////////////
TD_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureTopDownPredictRule(),
FeatureSingleEdgeFundamentalRule()]
BU_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictRule(),
FeatureSingleEdgeFundamentalRule()]
BU_LC_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictCombineRule(),
FeatureSingleEdgeFundamentalRule()]
class FeatureChartParser(ChartParser):
def __init__(self, grammar,
strategy=BU_LC_FEATURE_STRATEGY,
trace_chart_width=20,
chart_class=FeatureChart,
**parser_args):
ChartParser.__init__(self, grammar,
strategy=strategy,
trace_chart_width=trace_chart_width,
chart_class=chart_class,
**parser_args)
class FeatureTopDownChartParser(FeatureChartParser):
def __init__(self, grammar, **parser_args):
FeatureChartParser.__init__(self, grammar, TD_FEATURE_STRATEGY, **parser_args)
class FeatureBottomUpChartParser(FeatureChartParser):
def __init__(self, grammar, **parser_args):
FeatureChartParser.__init__(self, grammar, BU_FEATURE_STRATEGY, **parser_args)
class FeatureBottomUpLeftCornerChartParser(FeatureChartParser):
def __init__(self, grammar, **parser_args):
FeatureChartParser.__init__(self, grammar, BU_LC_FEATURE_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Instantiate Variable Chart
#////////////////////////////////////////////////////////////
class InstantiateVarsChart(FeatureChart):
"""
A specialized chart that 'instantiates' variables whose names
start with '@', by replacing them with unique new variables.
In particular, whenever a complete edge is added to the chart, any
variables in the edge's ``lhs`` whose names start with '@' will be
replaced by unique new ``Variable``s.
"""
def __init__(self, tokens):
FeatureChart.__init__(self, tokens)
def initialize(self):
self._instantiated = set()
FeatureChart.initialize(self)
def insert(self, edge, child_pointer_list):
if edge in self._instantiated: return False
self.instantiate_edge(edge)
return FeatureChart.insert(self, edge, child_pointer_list)
def instantiate_edge(self, edge):
"""
If the edge is a ``FeatureTreeEdge``, and it is complete,
then instantiate all variables whose names start with '@',
by replacing them with unique new variables.
Note that instantiation is done in-place, since the
parsing algorithms might already hold a reference to
the edge for future use.
"""
# If the edge is a leaf, or is not complete, or is
# already in the chart, then just return it as-is.
if not isinstance(edge, FeatureTreeEdge): return
if not edge.is_complete(): return
if edge in self._edge_to_cpls: return
# Get a list of variables that need to be instantiated.
# If there are none, then return as-is.
inst_vars = self.inst_vars(edge)
if not inst_vars: return
# Instantiate the edge!
self._instantiated.add(edge)
edge._lhs = edge.lhs().substitute_bindings(inst_vars)
def inst_vars(self, edge):
return dict((var, logic.unique_variable())
for var in edge.lhs().variables()
if var.name.startswith('@'))
#////////////////////////////////////////////////////////////
# Demo
#////////////////////////////////////////////////////////////
def demo_grammar():
from nltk.grammar import FeatureGrammar
return FeatureGrammar.fromstring("""
S -> NP VP
PP -> Prep NP
NP -> NP PP
VP -> VP PP
VP -> Verb NP
VP -> Verb
NP -> Det[pl=?x] Noun[pl=?x]
NP -> "John"
NP -> "I"
Det -> "the"
Det -> "my"
Det[-pl] -> "a"
Noun[-pl] -> "dog"
Noun[-pl] -> "cookie"
Verb -> "ate"
Verb -> "saw"
Prep -> "with"
Prep -> "under"
""")
def demo(print_times=True, print_grammar=True,
print_trees=True, print_sentence=True,
trace=1,
parser=FeatureChartParser,
sent='I saw John with a dog with my cookie'):
import sys, time
print()
grammar = demo_grammar()
if print_grammar:
print(grammar)
print()
print("*", parser.__name__)
if print_sentence:
print("Sentence:", sent)
tokens = sent.split()
t = time.clock()
cp = parser(grammar, trace=trace)
chart = cp.chart_parse(tokens)
trees = list(chart.parses(grammar.start()))
if print_times:
print("Time: %s" % (time.clock() - t))
if print_trees:
for tree in trees: print(tree)
else:
print("Nr trees:", len(trees))
def run_profile():
import profile
profile.run('for i in range(1): demo()', '/tmp/profile.out')
import pstats
p = pstats.Stats('/tmp/profile.out')
p.strip_dirs().sort_stats('time', 'cum').print_stats(60)
p.strip_dirs().sort_stats('cum', 'time').print_stats(60)
if __name__ == '__main__':
from nltk.data import load
demo()
print()
grammar = load('grammars/book_grammars/feat0.fcfg')
cp = FeatureChartParser(grammar, trace=2)
sent = 'Kim likes children'
tokens = sent.split()
trees = cp.parse(tokens)
for tree in trees:
print(tree)
| 38.29361
| 89
| 0.583619
|
ef797e8b7060ed81343a048dd7a40516ba42a305
| 4,915
|
py
|
Python
|
research/skip_thoughts/skip_thoughts/ops/gru_cell.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | 1
|
2019-09-11T09:41:11.000Z
|
2019-09-11T09:41:11.000Z
|
research/skip_thoughts/skip_thoughts/ops/gru_cell.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
research/skip_thoughts/skip_thoughts/ops/gru_cell.py
|
vincentcheny/models
|
afb1a59fc1bc792ac72d1a3e22e2469020529788
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GRU cell implementation for the skip-thought vectors model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_layer_norm = tf.contrib.layers.layer_norm
class LayerNormGRUCell(tf.contrib.rnn.RNNCell):
"""GRU cell with layer normalization.
The layer normalization implementation is based on:
https://arxiv.org/abs/1607.06450.
"Layer Normalization"
Jimmy Lei Ba, Jamie Ryan Kiros, Geoffrey E. Hinton
"""
def __init__(self,
num_units,
w_initializer,
u_initializer,
b_initializer,
activation=tf.nn.tanh):
"""Initializes the cell.
Args:
num_units: Number of cell units.
w_initializer: Initializer for the "W" (input) parameter matrices.
u_initializer: Initializer for the "U" (recurrent) parameter matrices.
b_initializer: Initializer for the "b" (bias) parameter vectors.
activation: Cell activation function.
"""
self._num_units = num_units
self._w_initializer = w_initializer
self._u_initializer = u_initializer
self._b_initializer = b_initializer
self._activation = activation
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def _w_h_initializer(self):
"""Returns an initializer for the "W_h" parameter matrix.
See equation (23) in the paper. The "W_h" parameter matrix is the
concatenation of two parameter submatrices. The matrix returned is
[U_z, U_r].
Returns:
A Tensor with shape [num_units, 2 * num_units] as described above.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None):
num_units = self._num_units
assert shape == [num_units, 2 * num_units]
u_z = self._u_initializer([num_units, num_units], dtype, partition_info)
u_r = self._u_initializer([num_units, num_units], dtype, partition_info)
return tf.concat([u_z, u_r], 1)
return _initializer
def _w_x_initializer(self, input_dim):
"""Returns an initializer for the "W_x" parameter matrix.
See equation (23) in the paper. The "W_x" parameter matrix is the
concatenation of two parameter submatrices. The matrix returned is
[W_z, W_r].
Args:
input_dim: The dimension of the cell inputs.
Returns:
A Tensor with shape [input_dim, 2 * num_units] as described above.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None):
num_units = self._num_units
assert shape == [input_dim, 2 * num_units]
w_z = self._w_initializer([input_dim, num_units], dtype, partition_info)
w_r = self._w_initializer([input_dim, num_units], dtype, partition_info)
return tf.concat([w_z, w_r], 1)
return _initializer
def __call__(self, inputs, state, scope=None):
"""GRU cell with layer normalization."""
input_dim = inputs.get_shape().as_list()[1]
num_units = self._num_units
with tf.variable_scope(scope or "gru_cell"):
with tf.variable_scope("gates"):
w_h = tf.get_variable(
"w_h", [num_units, 2 * num_units],
initializer=self._w_h_initializer())
w_x = tf.get_variable(
"w_x", [input_dim, 2 * num_units],
initializer=self._w_x_initializer(input_dim))
z_and_r = (_layer_norm(tf.matmul(state, w_h), scope="layer_norm/w_h") +
_layer_norm(tf.matmul(inputs, w_x), scope="layer_norm/w_x"))
z, r = tf.split(tf.sigmoid(z_and_r), 2, 1)
with tf.variable_scope("candidate"):
w = tf.get_variable(
"w", [input_dim, num_units], initializer=self._w_initializer)
u = tf.get_variable(
"u", [num_units, num_units], initializer=self._u_initializer)
h_hat = (r * _layer_norm(tf.matmul(state, u), scope="layer_norm/u") +
_layer_norm(tf.matmul(inputs, w), scope="layer_norm/w"))
new_h = (1 - z) * state + z * self._activation(h_hat)
return new_h, new_h
| 36.407407
| 81
| 0.652492
|
5732e5b11700273781b162c6f1181ff05ff3e977
| 2,630
|
py
|
Python
|
src/convert_sparse_to_patoh.py
|
mywoodstock/HAMPtOn
|
43565122a0009e6b33c7131072281df312cb488b
|
[
"MIT"
] | null | null | null |
src/convert_sparse_to_patoh.py
|
mywoodstock/HAMPtOn
|
43565122a0009e6b33c7131072281df312cb488b
|
[
"MIT"
] | null | null | null |
src/convert_sparse_to_patoh.py
|
mywoodstock/HAMPtOn
|
43565122a0009e6b33c7131072281df312cb488b
|
[
"MIT"
] | null | null | null |
##
# Project:
#
# File: convert_sparse_to_patoh.py
# Created: Mar 12, 2014
#
# Author: Abhinav Sarje <asarje@lbl.gov>
#
# Description: Convert sparse graph representation (row, col, val) into hypergraph representation
# for input to PATOH.
##
import sys, getopt
def parse_arguments(argv):
try:
opts, args = getopt.getopt(argv, "i:o:")
if len(opts) < 2:
raise getopt.GetoptError("Give arguments")
except getopt.GetoptError:
print "convert_sparse_to_hyper.py -i <inputfile> -o <outputfile>"
sys.exit(2)
for opt, arg in opts:
if opt in ("-i"):
inputfile = arg
elif opt in ("-o"):
outputfile = arg
else:
print "wrong argument"
sys.exit(2)
return inputfile, outputfile
def read_sparse_file(filename, data):
ff = open(filename)
head = ff.readline()
header = ""
while head.startswith("%"):
header += head
head = ff.readline()
headwords = head.strip().split()
if len(headwords) == 2:
nodes = int(headwords[0])
edges = int(headwords[1])
elif len(headwords) == 3:
nodes = int(headwords[0])
nodes2 = int(headwords[1])
edges = int(headwords[2])
if nodes != nodes2:
print "error: input matrix is not square"
ff.close()
sys.exit(2)
else:
print "wrong header in input file"
ff.close()
sys.exit(2)
count = 0
while True:
line = ff.readline()
if not line: break
count += 1
words = line.strip().split()
data += [(int(words[0]), int(words[1]), float(words[2]))]
if count != edges:
print "error: mismatch in number of edges and data in input file"
ff.close()
sys.exit(2)
ff.close()
return nodes, edges
## assuming symmetric data is already present
def convert_sparse_to_hyper(sparse_data, hyper_data):
pins = 0
for row, col, val in sparse_data:
if col not in hyper_data:
hyper_data[col] = []
hyper_data[col] += [row]
pins += 1
return pins
def write_hyper_file(filename, nnodes, nedges, hyper_data):
ff = open(filename, 'w')
head = "%d\t%d\t%d\t%d\n" % (1, nnodes, nnodes, nedges)
ff.write(head)
count = 1
for net, nodes in sorted(hyper_data.items()):
if count != net:
print "error: missing data?"
ff.close()
sys.exit(2)
record = ""
for node in nodes:
record += "%d\t" % node
record += "\n"
ff.write(record)
count += 1
ff.close()
infile, outfile = parse_arguments(sys.argv[1:])
sparse_data = []
nnodes, nedges = read_sparse_file(infile, sparse_data)
hyper_data = {} ## { net/hyperedge = [ nodeid, ... ], ... }
npins = convert_sparse_to_hyper(sparse_data, hyper_data)
print "nodes = %d, edges = %d, pins = %d" % (nnodes, nedges, npins)
write_hyper_file(outfile, nnodes, npins, hyper_data)
| 24.12844
| 98
| 0.658555
|
2efef8ac56adcfa21664afd54711c87b6f2142c3
| 7,996
|
py
|
Python
|
detect.py
|
2aliveDeepfake/face_5landmark
|
2db0edce855deea145f4ac80b6166faa0df04d2e
|
[
"MIT"
] | null | null | null |
detect.py
|
2aliveDeepfake/face_5landmark
|
2db0edce855deea145f4ac80b6166faa0df04d2e
|
[
"MIT"
] | null | null | null |
detect.py
|
2aliveDeepfake/face_5landmark
|
2db0edce855deea145f4ac80b6166faa0df04d2e
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
import time
from data import cfg_mnet, cfg_slim, cfg_rfb
from layers.functions.prior_box import PriorBox
from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from models.retinaface import RetinaFace
from models.net_slim import Slim
from models.net_rfb import RFB
from utils.box_utils import decode, decode_landm
from utils.timer import Timer
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('-m', '--trained_model', default='./weights/RBF_Final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--network', default='RFB', help='Backbone network mobile0.25 or slim or RFB')
parser.add_argument('--origin_size', default=True, type=str, help='Whether use origin image size to evaluate')
parser.add_argument('--long_side', default=640, help='when origin_size is false, long_side is scaled size(320 or 640 for long side)')
parser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results')
parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
parser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
parser.add_argument('--save_image', action="store_true", default=True, help='show detection results')
parser.add_argument('--vis_thres', default=0.6, type=float, help='visualization_threshold')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
if __name__ == '__main__':
torch.set_grad_enabled(False)
cfg = None
net = None
if args.network == "mobile0.25":
cfg = cfg_mnet
net = RetinaFace(cfg = cfg, phase = 'test')
elif args.network == "slim":
cfg = cfg_slim
net = Slim(cfg = cfg, phase = 'test')
elif args.network == "RFB":
cfg = cfg_rfb
net = RFB(cfg = cfg, phase = 'test')
else:
print("Don't support network!")
exit(0)
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print('Finished loading model!')
print(net)
cudnn.benchmark = True
device = torch.device("cpu" if args.cpu else "cuda")
net = net.to(device)
# testing begin
for i in range(100):
image_path = "./img/sample.jpg"
img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
# testing scale
target_size = args.long_side
max_size = args.long_side
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
resize = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(resize * im_size_max) > max_size:
resize = float(max_size) / float(im_size_max)
if args.origin_size:
resize = 1
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
tic = time.time()
loc, conf, landms = net(img) # forward pass
print('net forward time: {:.4f}'.format(time.time() - tic))
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]])
scale1 = scale1.to(device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, args.nms_threshold)
# keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
dets = dets[:args.keep_top_k, :]
landms = landms[:args.keep_top_k, :]
dets = np.concatenate((dets, landms), axis=1)
# show image
if args.save_image:
for b in dets:
if b[4] < args.vis_thres:
continue
text = "{:.4f}".format(b[4])
b = list(map(int, b))
cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
cx = b[0]
cy = b[1] + 12
cv2.putText(img_raw, text, (cx, cy),
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# landms
cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)
cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)
cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)
cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)
cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)
# save image
name = "test.jpg"
cv2.imwrite(name, img_raw)
| 39.98
| 133
| 0.626188
|
55dd2677a81c9d281b79f1d5bdd38cd4fbfbb22c
| 301
|
py
|
Python
|
338.counting-bits.py
|
W-KE/Leetcode-Solutions
|
b6f40e45ddfa1a61a05f832b79581a34dab832cd
|
[
"MIT"
] | null | null | null |
338.counting-bits.py
|
W-KE/Leetcode-Solutions
|
b6f40e45ddfa1a61a05f832b79581a34dab832cd
|
[
"MIT"
] | null | null | null |
338.counting-bits.py
|
W-KE/Leetcode-Solutions
|
b6f40e45ddfa1a61a05f832b79581a34dab832cd
|
[
"MIT"
] | null | null | null |
class Solution:
def countBits(self, num: int) -> List[int]:
result = [0]
index = 0
for i in range(1, num + 1):
if i != 0 and (i & (i - 1)) == 0:
index = 0
result.append(1 + result[index])
index += 1
return result
| 27.363636
| 47
| 0.431894
|
8c5774e573f2ea46603649c12902671dc02cd451
| 10,875
|
py
|
Python
|
source/features/successor.py
|
mike-gimelfarb/deep-successor-features-for-transfer
|
42a547fdc6c9e34e2e92735d615cdb9db0059aee
|
[
"MIT"
] | 13
|
2021-04-24T23:21:09.000Z
|
2022-03-23T06:23:51.000Z
|
source/features/successor.py
|
mike-gimelfarb/successor-features-for-transfer
|
42a547fdc6c9e34e2e92735d615cdb9db0059aee
|
[
"MIT"
] | null | null | null |
source/features/successor.py
|
mike-gimelfarb/successor-features-for-transfer
|
42a547fdc6c9e34e2e92735d615cdb9db0059aee
|
[
"MIT"
] | 3
|
2021-02-20T09:21:21.000Z
|
2021-03-29T05:38:54.000Z
|
# -*- coding: UTF-8 -*-
import numpy as np
class SF:
def __init__(self, learning_rate_w, *args, use_true_reward=False, **kwargs):
"""
Creates a new abstract successor feature representation.
Parameters
----------
learning_rate_w : float
the learning rate to use for learning the reward weights using gradient descent
use_true_reward : boolean
whether or not to use the true reward weights from the environment, or learn them
using gradient descent
"""
self.alpha_w = learning_rate_w
self.use_true_reward = use_true_reward
if len(args) != 0 or len(kwargs) != 0:
print(self.__class__.__name__ + ' ignoring parameters ' + str(args) + ' and ' + str(kwargs))
def build_successor(self, task, source=None):
"""
Builds a new successor feature map for the specified task. This method should not be called directly.
Instead, add_task should be called instead.
Parameters
----------
task : Task
a new MDP environment for which to learn successor features
source : integer
if specified and not None, the parameters of the successor features for the task at the source
index should be copied to the new successor features, as suggested in [1]
Returns
-------
object : the successor feature representation for the new task, which can be a Keras model,
a lookup table (dictionary) or another learning representation
"""
raise NotImplementedError
def get_successor(self, state, policy_index):
"""
Evaluates the successor features in given states for the specified task.
Parameters
----------
state : object
a state or collection of states of the MDP
policy_index : integer
the index of the task whose successor features to evaluate
Returns
-------
np.ndarray : the evaluation of the successor features, which is of shape
[n_batch, n_actions, n_features], where
n_batch is the number of states in the state argument
n_actions is the number of actions of the MDP
n_features is the number of features in the SF representation
"""
raise NotImplementedError
def get_successors(self, state):
"""
Evaluates the successor features in given states for all tasks.
Parameters
----------
state : object
a state or collection of states of the MDP
Returns
-------
np.ndarray : the evaluation of the successor features, which is of shape
[n_batch, n_tasks, n_actions, n_features], where
n_batch is the number of states in the state argument
n_tasks is the number of tasks
n_actions is the number of actions of the MDP
n_features is the number of features in the SF representation
"""
raise NotImplementedError
def update_successor(self, transitions, policy_index):
"""
Updates the successor representation by training it on the given transition.
Parameters
----------
transitions : object
collection of transitions
policy_index : integer
the index of the task whose successor features to update
"""
raise NotImplementedError
def reset(self):
"""
Removes all trained successor feature representations from the current object, all learned rewards,
and all task information.
"""
self.n_tasks = 0
self.psi = []
self.true_w = []
self.fit_w = []
self.gpi_counters = []
def add_training_task(self, task, source=None):
"""
Adds a successor feature representation for the specified task.
Parameters
----------
task : Task
a new MDP environment for which to learn successor features
source : integer
if specified and not None, the parameters of the successor features for the task at the source
index should be copied to the new successor features, as suggested in [1]
"""
# add successor features to the library
self.psi.append(self.build_successor(task, source))
self.n_tasks = len(self.psi)
# build new reward function
true_w = task.get_w()
self.true_w.append(true_w)
if self.use_true_reward:
fit_w = true_w
else:
n_features = task.feature_dim()
fit_w = np.random.uniform(low=-0.01, high=0.01, size=(n_features, 1))
self.fit_w.append(fit_w)
# add statistics
for i in range(len(self.gpi_counters)):
self.gpi_counters[i] = np.append(self.gpi_counters[i], 0)
self.gpi_counters.append(np.zeros((self.n_tasks,), dtype=int))
def update_reward(self, phi, r, task_index, exact=False):
"""
Updates the reward parameters for the given task based on the observed reward sample
from the environment.
Parameters
----------
phi : np.ndarray
the state features
r : float
the observed reward from the MDP
task_index : integer
the index of the task from which this reward was sampled
exact : boolean
if True, validates the true reward from the environment and the linear representation
"""
# update reward using linear regression
w = self.fit_w[task_index]
phi = phi.reshape(w.shape)
r_fit = np.sum(phi * w)
self.fit_w[task_index] = w + self.alpha_w * (r - r_fit) * phi
# validate reward
r_true = np.sum(phi * self.true_w[task_index])
if exact and not np.allclose(r, r_true):
raise Exception('sampled reward {} != linear reward {} - please check task {}!'.format(
r, r_true, task_index))
def GPE_w(self, state, policy_index, w):
"""
Implements generalized policy evaluation according to [1]. In summary, this uses the
learned reward parameters of one task and successor features of a policy to estimate the Q-values of
the policy if it were executed in that task.
Parameters
----------
state : object
a state or collection of states of the MDP
policy_index : integer
the index of the task whose policy to evaluate
w : numpy array
reward parameters of the task in which to evaluate the policy
Returns
-------
np.ndarray : the estimated Q-values of shape [n_batch, n_actions], where
n_batch is the number of states in the state argument
n_actions is the number of actions in the MDP
"""
psi = self.get_successor(state, policy_index)
q = psi @ w # shape (n_batch, n_actions)
return q
def GPE(self, state, policy_index, task_index):
"""
Implements generalized policy evaluation according to [1]. In summary, this uses the
learned reward parameters of one task and successor features of a policy to estimate the Q-values of
the policy if it were executed in that task.
Parameters
----------
state : object
a state or collection of states of the MDP
policy_index : integer
the index of the task whose policy to evaluate
task_index : integer
the index of the task (e.g. reward) to use to evaluate the policy
Returns
-------
np.ndarray : the estimated Q-values of shpae [n_batch, n_actions], where
n_batch is the number of states in the state argument
n_actions is the number of actions in the MDP
"""
return self.GPE_w(state, policy_index, self.fit_w[task_index])
def GPI_w(self, state, w):
"""
Implements generalized policy improvement according to [1].
Parameters
----------
state : object
a state or collection of states of the MDP
w : numpy array
the reward parameters of the task to control
Returns
-------
np.ndarray : the maximum Q-values computed by GPI for selecting actions
of shape [n_batch, n_tasks, n_actions], where:
n_batch is the number of states in the state argument
n_tasks is the number of tasks
n_actions is the number of actions in the MDP
np.ndarray : the tasks that are active in each state of state_batch in GPi
"""
psi = self.get_successors(state)
q = (psi @ w)[:,:,:, 0] # shape (n_batch, n_tasks, n_actions)
task = np.squeeze(np.argmax(np.max(q, axis=2), axis=1)) # shape (n_batch,)
return q, task
def GPI(self, state, task_index, update_counters=False):
"""
Implements generalized policy improvement according to [1].
Parameters
----------
state : object
a state or collection of states of the MDP
task_index : integer
the index of the task in which the GPI action will be used
update_counters : boolean
whether or not to keep track of which policies are active in GPI
Returns
-------
np.ndarray : the maximum Q-values computed by GPI for selecting actions
of shape [n_batch, n_tasks, n_actions], where:
n_batch is the number of states in the state argument
n_tasks is the number of tasks
n_actions is the number of actions in the MDP
np.ndarray : the tasks that are active in each state of state_batch in GPi
"""
q, task = self.GPI_w(state, self.fit_w[task_index])
if update_counters:
self.gpi_counters[task_index][task] += 1
return q, task
def GPI_usage_percent(self, task_index):
"""
Counts the number of times that actions were transferred from other tasks.
Parameters
----------
task_index : integer
the index of the task
Returns
-------
float : the (normalized) number of actions that were transferred from other
tasks in GPi.
"""
counts = self.gpi_counters[task_index]
return 1. - (float(counts[task_index]) / np.sum(counts))
| 38.024476
| 109
| 0.587402
|
4fc8f391ff73f05a9e559fdf9f32a3eeb842955c
| 22,977
|
py
|
Python
|
google/cloud/recommender_v1beta1/services/recommender/transports/grpc_asyncio.py
|
googleapis/python-recommender
|
fc710260170dd6ec0b0547f5e2827e12b5b624a9
|
[
"Apache-2.0"
] | 10
|
2020-09-19T18:33:54.000Z
|
2022-03-31T04:20:57.000Z
|
google/cloud/recommender_v1beta1/services/recommender/transports/grpc_asyncio.py
|
googleapis/python-recommender
|
fc710260170dd6ec0b0547f5e2827e12b5b624a9
|
[
"Apache-2.0"
] | 50
|
2020-02-05T13:32:44.000Z
|
2022-03-07T16:59:56.000Z
|
google/cloud/recommender_v1beta1/services/recommender/transports/grpc_asyncio.py
|
googleapis/python-recommender
|
fc710260170dd6ec0b0547f5e2827e12b5b624a9
|
[
"Apache-2.0"
] | 9
|
2020-02-08T13:52:09.000Z
|
2022-01-29T08:13:17.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.recommender_v1beta1.types import insight
from google.cloud.recommender_v1beta1.types import recommendation
from google.cloud.recommender_v1beta1.types import recommender_service
from .base import RecommenderTransport, DEFAULT_CLIENT_INFO
from .grpc import RecommenderGrpcTransport
class RecommenderGrpcAsyncIOTransport(RecommenderTransport):
"""gRPC AsyncIO backend transport for Recommender.
Provides insights and recommendations for cloud customers for
various categories like performance optimization, cost savings,
reliability, feature discovery, etc. Insights and
recommendations are generated automatically based on analysis of
user resources, configuration and monitoring metrics.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "recommender.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "recommender.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_insights(
self,
) -> Callable[
[recommender_service.ListInsightsRequest],
Awaitable[recommender_service.ListInsightsResponse],
]:
r"""Return a callable for the list insights method over gRPC.
Lists insights for a Cloud project. Requires the
recommender.*.list IAM permission for the specified insight
type.
Returns:
Callable[[~.ListInsightsRequest],
Awaitable[~.ListInsightsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_insights" not in self._stubs:
self._stubs["list_insights"] = self.grpc_channel.unary_unary(
"/google.cloud.recommender.v1beta1.Recommender/ListInsights",
request_serializer=recommender_service.ListInsightsRequest.serialize,
response_deserializer=recommender_service.ListInsightsResponse.deserialize,
)
return self._stubs["list_insights"]
@property
def get_insight(
self,
) -> Callable[[recommender_service.GetInsightRequest], Awaitable[insight.Insight]]:
r"""Return a callable for the get insight method over gRPC.
Gets the requested insight. Requires the recommender.*.get IAM
permission for the specified insight type.
Returns:
Callable[[~.GetInsightRequest],
Awaitable[~.Insight]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_insight" not in self._stubs:
self._stubs["get_insight"] = self.grpc_channel.unary_unary(
"/google.cloud.recommender.v1beta1.Recommender/GetInsight",
request_serializer=recommender_service.GetInsightRequest.serialize,
response_deserializer=insight.Insight.deserialize,
)
return self._stubs["get_insight"]
@property
def mark_insight_accepted(
self,
) -> Callable[
[recommender_service.MarkInsightAcceptedRequest], Awaitable[insight.Insight]
]:
r"""Return a callable for the mark insight accepted method over gRPC.
Marks the Insight State as Accepted. Users can use this method
to indicate to the Recommender API that they have applied some
action based on the insight. This stops the insight content from
being updated.
MarkInsightAccepted can be applied to insights in ACTIVE state.
Requires the recommender.*.update IAM permission for the
specified insight.
Returns:
Callable[[~.MarkInsightAcceptedRequest],
Awaitable[~.Insight]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mark_insight_accepted" not in self._stubs:
self._stubs["mark_insight_accepted"] = self.grpc_channel.unary_unary(
"/google.cloud.recommender.v1beta1.Recommender/MarkInsightAccepted",
request_serializer=recommender_service.MarkInsightAcceptedRequest.serialize,
response_deserializer=insight.Insight.deserialize,
)
return self._stubs["mark_insight_accepted"]
@property
def list_recommendations(
self,
) -> Callable[
[recommender_service.ListRecommendationsRequest],
Awaitable[recommender_service.ListRecommendationsResponse],
]:
r"""Return a callable for the list recommendations method over gRPC.
Lists recommendations for a Cloud project. Requires the
recommender.*.list IAM permission for the specified recommender.
Returns:
Callable[[~.ListRecommendationsRequest],
Awaitable[~.ListRecommendationsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_recommendations" not in self._stubs:
self._stubs["list_recommendations"] = self.grpc_channel.unary_unary(
"/google.cloud.recommender.v1beta1.Recommender/ListRecommendations",
request_serializer=recommender_service.ListRecommendationsRequest.serialize,
response_deserializer=recommender_service.ListRecommendationsResponse.deserialize,
)
return self._stubs["list_recommendations"]
@property
def get_recommendation(
self,
) -> Callable[
[recommender_service.GetRecommendationRequest],
Awaitable[recommendation.Recommendation],
]:
r"""Return a callable for the get recommendation method over gRPC.
Gets the requested recommendation. Requires the
recommender.*.get IAM permission for the specified recommender.
Returns:
Callable[[~.GetRecommendationRequest],
Awaitable[~.Recommendation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_recommendation" not in self._stubs:
self._stubs["get_recommendation"] = self.grpc_channel.unary_unary(
"/google.cloud.recommender.v1beta1.Recommender/GetRecommendation",
request_serializer=recommender_service.GetRecommendationRequest.serialize,
response_deserializer=recommendation.Recommendation.deserialize,
)
return self._stubs["get_recommendation"]
@property
def mark_recommendation_claimed(
self,
) -> Callable[
[recommender_service.MarkRecommendationClaimedRequest],
Awaitable[recommendation.Recommendation],
]:
r"""Return a callable for the mark recommendation claimed method over gRPC.
Marks the Recommendation State as Claimed. Users can use this
method to indicate to the Recommender API that they are starting
to apply the recommendation themselves. This stops the
recommendation content from being updated. Associated insights
are frozen and placed in the ACCEPTED state.
MarkRecommendationClaimed can be applied to recommendations in
CLAIMED or ACTIVE state.
Requires the recommender.*.update IAM permission for the
specified recommender.
Returns:
Callable[[~.MarkRecommendationClaimedRequest],
Awaitable[~.Recommendation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mark_recommendation_claimed" not in self._stubs:
self._stubs["mark_recommendation_claimed"] = self.grpc_channel.unary_unary(
"/google.cloud.recommender.v1beta1.Recommender/MarkRecommendationClaimed",
request_serializer=recommender_service.MarkRecommendationClaimedRequest.serialize,
response_deserializer=recommendation.Recommendation.deserialize,
)
return self._stubs["mark_recommendation_claimed"]
@property
def mark_recommendation_succeeded(
self,
) -> Callable[
[recommender_service.MarkRecommendationSucceededRequest],
Awaitable[recommendation.Recommendation],
]:
r"""Return a callable for the mark recommendation succeeded method over gRPC.
Marks the Recommendation State as Succeeded. Users can use this
method to indicate to the Recommender API that they have applied
the recommendation themselves, and the operation was successful.
This stops the recommendation content from being updated.
Associated insights are frozen and placed in the ACCEPTED state.
MarkRecommendationSucceeded can be applied to recommendations in
ACTIVE, CLAIMED, SUCCEEDED, or FAILED state.
Requires the recommender.*.update IAM permission for the
specified recommender.
Returns:
Callable[[~.MarkRecommendationSucceededRequest],
Awaitable[~.Recommendation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mark_recommendation_succeeded" not in self._stubs:
self._stubs[
"mark_recommendation_succeeded"
] = self.grpc_channel.unary_unary(
"/google.cloud.recommender.v1beta1.Recommender/MarkRecommendationSucceeded",
request_serializer=recommender_service.MarkRecommendationSucceededRequest.serialize,
response_deserializer=recommendation.Recommendation.deserialize,
)
return self._stubs["mark_recommendation_succeeded"]
@property
def mark_recommendation_failed(
self,
) -> Callable[
[recommender_service.MarkRecommendationFailedRequest],
Awaitable[recommendation.Recommendation],
]:
r"""Return a callable for the mark recommendation failed method over gRPC.
Marks the Recommendation State as Failed. Users can use this
method to indicate to the Recommender API that they have applied
the recommendation themselves, and the operation failed. This
stops the recommendation content from being updated. Associated
insights are frozen and placed in the ACCEPTED state.
MarkRecommendationFailed can be applied to recommendations in
ACTIVE, CLAIMED, SUCCEEDED, or FAILED state.
Requires the recommender.*.update IAM permission for the
specified recommender.
Returns:
Callable[[~.MarkRecommendationFailedRequest],
Awaitable[~.Recommendation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mark_recommendation_failed" not in self._stubs:
self._stubs["mark_recommendation_failed"] = self.grpc_channel.unary_unary(
"/google.cloud.recommender.v1beta1.Recommender/MarkRecommendationFailed",
request_serializer=recommender_service.MarkRecommendationFailedRequest.serialize,
response_deserializer=recommendation.Recommendation.deserialize,
)
return self._stubs["mark_recommendation_failed"]
def close(self):
return self.grpc_channel.close()
__all__ = ("RecommenderGrpcAsyncIOTransport",)
| 44.789474
| 100
| 0.654742
|
2522f3fb226e4b3936f90fd9fb681acc7a6de362
| 1,462
|
py
|
Python
|
cvat/apps/dataset_manager/formats/camvid.py
|
gitibharatibiswal/cvat-adas-team
|
a33c1ae38d79ed6e35a61a97b74cd2978853e4c2
|
[
"Intel",
"MIT"
] | 2
|
2022-03-13T03:45:15.000Z
|
2022-03-13T03:46:19.000Z
|
cvat/apps/dataset_manager/formats/camvid.py
|
gitibharatibiswal/cvat-adas-team
|
a33c1ae38d79ed6e35a61a97b74cd2978853e4c2
|
[
"Intel",
"MIT"
] | 3
|
2021-03-02T17:07:15.000Z
|
2021-04-14T13:33:20.000Z
|
cvat/apps/dataset_manager/formats/camvid.py
|
gitibharatibiswal/cvat-adas-team
|
a33c1ae38d79ed6e35a61a97b74cd2978853e4c2
|
[
"Intel",
"MIT"
] | 41
|
2020-12-16T05:52:41.000Z
|
2021-09-23T08:45:16.000Z
|
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
from tempfile import TemporaryDirectory
from datumaro.components.dataset import Dataset
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import (GetCVATDataExtractor,
import_dm_annotations)
from cvat.apps.dataset_manager.util import make_zip_archive
from .registry import dm_env, exporter, importer
from .utils import make_colormap
@exporter(name='CamVid', ext='ZIP', version='1.0')
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
dataset.transform('polygons_to_masks')
dataset.transform('boxes_to_masks')
dataset.transform('merge_instance_segments')
label_map = make_colormap(instance_data)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'camvid',
save_images=save_images, apply_colormap=True,
label_map={label: label_map[label][0] for label in label_map})
make_zip_archive(temp_dir, dst_file)
@importer(name='CamVid', ext='ZIP', version='1.0')
def _import(src_file, instance_data):
with TemporaryDirectory() as tmp_dir:
Archive(src_file.name).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'camvid', env=dm_env)
dataset.transform('masks_to_polygons')
import_dm_annotations(dataset, instance_data)
| 35.658537
| 74
| 0.75513
|
d75e9993aa570368d03533d540203ecee156ca81
| 895
|
py
|
Python
|
constant_listener/constant_listener.py
|
MattWis/constant_listener
|
0c0b32101041c4263114cd9e3306a62d20eedecb
|
[
"MIT"
] | 1
|
2015-05-07T08:24:03.000Z
|
2015-05-07T08:24:03.000Z
|
constant_listener/constant_listener.py
|
MattWis/constant_listener
|
0c0b32101041c4263114cd9e3306a62d20eedecb
|
[
"MIT"
] | null | null | null |
constant_listener/constant_listener.py
|
MattWis/constant_listener
|
0c0b32101041c4263114cd9e3306a62d20eedecb
|
[
"MIT"
] | null | null | null |
from thread import start_new_thread
from pyaudio import PyAudio
from pyspeech import best_speech_result, put_audio_data_in_queue
from time import sleep
import Queue
def background_stt(queue, profile, stt_type = 'google'):
start_new_thread(_spawn_listeners, (queue, profile, stt_type,))
def _spawn_listeners(queue, profile, stt_type):
p = PyAudio()
audio_data_queue = Queue.Queue()
start_new_thread(put_audio_data_in_queue, (p, audio_data_queue,))
while True:
_listen(p, queue, audio_data_queue, profile, stt_type)
def _listen(pyaudio, queue_out, queue_in, profile, stt_type):
output = best_speech_result(pyaudio, queue_in.get(), profile, stt_type)
if output != "":
queue_out.put(output)
if __name__ == "__main__":
import yaml
profile = yaml.load(open("profile.yml").read())
q = Queue.Queue()
background_stt(q, profile, 'att')
while True:
print(q.get())
| 28.870968
| 73
| 0.747486
|
9f74533db45138669c45af097b2b25e26d5fa0a0
| 2,689
|
py
|
Python
|
tests/test_class_oelint_vars_pnbpnusage.py
|
skycaptain/oelint-adv
|
ff67d3149cf8b1de2b0b2d158a68f4e2cf5e9e46
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_class_oelint_vars_pnbpnusage.py
|
skycaptain/oelint-adv
|
ff67d3149cf8b1de2b0b2d158a68f4e2cf5e9e46
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_class_oelint_vars_pnbpnusage.py
|
skycaptain/oelint-adv
|
ff67d3149cf8b1de2b0b2d158a68f4e2cf5e9e46
|
[
"BSD-2-Clause"
] | null | null | null |
import pytest
from .base import TestBaseClass
class TestClassOelintVarsPNBPNUsage(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.vars.pnbpnusage'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'SRC_URI = "file://${PN}.patch"',
},
{
'oelint_adv_test.bb':
'SRC_URI = "git://${PN}.com/${PN}.git"',
},
{
'oelint_adv_test.bb':
'SRC_URI = "https://foo.org/${PN}"',
},
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.vars.pnbpnusage'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'SRC_URI = "file://${BPN}.patch"',
},
{
'oelint_adv_test.bb':
'SRC_URI = "git://${BPN}.com/${BPN}.git"',
},
{
'oelint_adv_test.bb':
'SRC_URI = "https://foo.org/${BPN}"',
},
{
'oelint_adv_test.bb':
'S = "${WORDKIR}/${BPN}"',
},
{
'oelint_adv_test.bb':
'SRC_URI = "git://foo.org/baz.git;name=${PN}-super"',
},
{
'oelint_adv_test.bb':
'SRC_URI = "git://foo.org/${BPN}.git;name=${PN}-ultra"',
},
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
| 44.081967
| 93
| 0.29788
|
364ecfe168695d0b4e872c8185c35200e3e99f54
| 4,988
|
py
|
Python
|
stubs.min/Autodesk/Revit/DB/__init___parts/ReferencePoint.py
|
denfromufa/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | 1
|
2017-07-07T11:15:45.000Z
|
2017-07-07T11:15:45.000Z
|
stubs.min/Autodesk/Revit/DB/__init___parts/ReferencePoint.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
stubs.min/Autodesk/Revit/DB/__init___parts/ReferencePoint.py
|
hdm-dt-fb/ironpython-stubs
|
4d2b405eda3ceed186e8adca55dd97c332c6f49d
|
[
"MIT"
] | null | null | null |
class ReferencePoint(Element,IDisposable):
""" A reference point in an Autodesk Revit family. """
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetCoordinatePlaneReferenceXY(self):
"""
GetCoordinatePlaneReferenceXY(self: ReferencePoint) -> Reference
A reference for the XY plane of the coordinate
system.
"""
pass
def GetCoordinatePlaneReferenceXZ(self):
"""
GetCoordinatePlaneReferenceXZ(self: ReferencePoint) -> Reference
A reference for the XZ plane of the coordinate
system.
"""
pass
def GetCoordinatePlaneReferenceYZ(self):
"""
GetCoordinatePlaneReferenceYZ(self: ReferencePoint) -> Reference
A reference for the YZ plane of the coordinate
system.
"""
pass
def GetCoordinateSystem(self):
"""
GetCoordinateSystem(self: ReferencePoint) -> Transform
The position and orientation of the ReferencePoint.
"""
pass
def GetHubId(self):
"""
GetHubId(self: ReferencePoint) -> ElementId
Id of associated Hub.
"""
pass
def GetInterpolatingCurves(self):
"""
GetInterpolatingCurves(self: ReferencePoint) -> CurveByPointsArray
The set of CurveByPoints elements that interpolate
a ReferencePoint.
"""
pass
def GetPointElementReference(self):
"""
GetPointElementReference(self: ReferencePoint) -> PointElementReference
Retrieve a copy of the rule that computes the
location of the ReferencePoint
relative to other elements in
the document.
Returns: A PointElementReference object,or ll if the
ReferencePoint does not have a
reference.
"""
pass
def GetVisibility(self):
"""
GetVisibility(self: ReferencePoint) -> FamilyElementVisibility
Gets the visibility for the point.
Returns: A copy of visibility settings for the
ReferencePoint.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def SetCoordinateSystem(self,coordinateSystem):
"""
SetCoordinateSystem(self: ReferencePoint,coordinateSystem: Transform)
The position and orientation of the ReferencePoint.
"""
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetPointElementReference(self,pointElementReference):
"""
SetPointElementReference(self: ReferencePoint,pointElementReference: PointElementReference)
Change the rule for computing the
location of the ReferencePoint relative to
other elements in
the document.
pointElementReference: An object specifying
a rule for the location and orientation of a
ReferencePoint.
(Note: The ReferencePoint object does not store the
pointElementReference object after this call.)
"""
pass
def SetVisibility(self,visibility):
"""
SetVisibility(self: ReferencePoint,visibility: FamilyElementVisibility)
Sets the visibility for the point.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
CoordinatePlaneVisibility=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Visibility settings for the coordinate reference planes.
Get: CoordinatePlaneVisibility(self: ReferencePoint) -> CoordinatePlaneVisibility
Set: CoordinatePlaneVisibility(self: ReferencePoint)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Name(self: ReferencePoint) -> str
Set: Name(self: ReferencePoint)=value
"""
Position=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The position of the ReferencePoint.
Get: Position(self: ReferencePoint) -> XYZ
Set: Position(self: ReferencePoint)=value
"""
ShowNormalReferencePlaneOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether all three coordinate planes are shown,or only the
normal (XY) plane.
Get: ShowNormalReferencePlaneOnly(self: ReferencePoint) -> bool
Set: ShowNormalReferencePlaneOnly(self: ReferencePoint)=value
"""
Visible=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Whether the point is visible when the family is loaded
into a project.
Get: Visible(self: ReferencePoint) -> bool
Set: Visible(self: ReferencePoint)=value
"""
| 30.601227
| 215
| 0.708901
|
ea03620c8e0b9b184e04b87fbe38324d87649c06
| 1,360
|
py
|
Python
|
applications/FluidDynamicsApplication/python_scripts/apply_embedded_nodes_initialization_process.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 778
|
2017-01-27T16:29:17.000Z
|
2022-03-30T03:01:51.000Z
|
applications/FluidDynamicsApplication/python_scripts/apply_embedded_nodes_initialization_process.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 6,634
|
2017-01-15T22:56:13.000Z
|
2022-03-31T15:03:36.000Z
|
applications/FluidDynamicsApplication/python_scripts/apply_embedded_nodes_initialization_process.py
|
lkusch/Kratos
|
e8072d8e24ab6f312765185b19d439f01ab7b27b
|
[
"BSD-4-Clause"
] | 224
|
2017-02-07T14:12:49.000Z
|
2022-03-06T23:09:34.000Z
|
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosFluid
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return ApplyEmbeddedNodesInitializationProcess(Model, settings["Parameters"])
class ApplyEmbeddedNodesInitializationProcess(KratosMultiphysics.Process):
def __init__(self, Model, settings):
KratosMultiphysics.Process.__init__(self)
default_parameters = KratosMultiphysics.Parameters( """
{
"mesh_id" : 0,
"model_part_name" : "CHOOSE_FLUID_MODELPART_NAME",
"max_iteration" : 10
} """ )
settings.ValidateAndAssignDefaults(default_parameters);
self.fluid_model_part = Model[settings["model_part_name"].GetString()]
self.max_iteration = settings["max_iteration"].GetInt()
self.EmbeddedNodesInitializationProcess = KratosFluid.EmbeddedNodesInitializationProcess(self.fluid_model_part,
self.max_iteration)
def ExecuteInitializeSolutionStep(self):
self.EmbeddedNodesInitializationProcess.ExecuteInitializeSolutionStep()
| 41.212121
| 119
| 0.671324
|
41f58e8339ae3026d9471e8bfd3a068315cbadee
| 1,913
|
py
|
Python
|
examples/16_device_queue_event.py
|
cedorman/depthai-python
|
e6783a2b8c880db59fa866facf4448bfc0b5270c
|
[
"MIT"
] | null | null | null |
examples/16_device_queue_event.py
|
cedorman/depthai-python
|
e6783a2b8c880db59fa866facf4448bfc0b5270c
|
[
"MIT"
] | null | null | null |
examples/16_device_queue_event.py
|
cedorman/depthai-python
|
e6783a2b8c880db59fa866facf4448bfc0b5270c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# This example demonstrates use of queue events to block a thread until a message
# arrives to any (of the specified) queue
import cv2
import depthai as dai
import numpy as np
# Start defining a pipeline
pipeline = dai.Pipeline()
# Create Color and Mono cameras
cam_rgb = pipeline.createColorCamera()
cam_mono = pipeline.createMonoCamera()
# Create separate streams for them
xout_rgb = pipeline.createXLinkOut()
xout_mono = pipeline.createXLinkOut()
# Set properties
xout_rgb.setStreamName("rgb")
xout_mono.setStreamName("mono")
# Cap color camera to 5 fps
cam_rgb.setFps(5)
cam_rgb.setInterleaved(True)
cam_rgb.setPreviewSize(300, 300)
# Connect
cam_rgb.preview.link(xout_rgb.input)
cam_mono.out.link(xout_mono.input)
# Pipeline defined, now the device is connected to
with dai.Device(pipeline) as device:
# Start pipeline
device.startPipeline()
# Clear queue events
device.getQueueEvents()
while True:
# Block until a message arrives to any of the specified queues
queueName = device.getQueueEvent(("rgb", "mono"))
# try getting that message from queue with name specified by the event
# Note: number of events doesn't necessarily match number of messages in queues
# because queues can be set to non-blocking (overwriting) behavior
message = device.getOutputQueue(queueName).tryGet()
# process separately
if queueName == "rgb" and type(message) == dai.ImgFrame :
frame_rgb = message.getData().reshape(message.getHeight(), message.getWidth(), 3)
frame_rgb = np.ascontiguousarray(frame_rgb)
cv2.imshow("rgb", frame_rgb)
elif queueName == "mono" and type(message) == dai.ImgFrame :
cv2.imshow("mono", message.getData().reshape((message.getHeight(), message.getWidth())))
if cv2.waitKey(1) == ord('q'):
break
| 31.883333
| 100
| 0.701516
|
0e2cbac2a840bf09877d6bac12a26daa1baf3610
| 883
|
py
|
Python
|
pytorch_ssd/args.py
|
piotlinski/ssd
|
169e14fb949f476626617364e5a540249addf75a
|
[
"MIT"
] | null | null | null |
pytorch_ssd/args.py
|
piotlinski/ssd
|
169e14fb949f476626617364e5a540249addf75a
|
[
"MIT"
] | null | null | null |
pytorch_ssd/args.py
|
piotlinski/ssd
|
169e14fb949f476626617364e5a540249addf75a
|
[
"MIT"
] | null | null | null |
"""Argument parsing utils."""
from argparse import ArgumentTypeError
from ast import literal_eval
from typing import Any, Tuple, Union
def str2bool(value: Union[str, bool]) -> bool:
"""Convert input string to boolean."""
if isinstance(value, bool):
return value
if value.lower() in ("yes", "true", "t", "y", "1"):
return True
elif value.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError("Boolean value expected.")
def comma_separated(value: str) -> Tuple[int, ...]:
"""Convert comma-separated values to tuple."""
converted = [int(v) for v in value.split(",") if v]
return tuple(converted)
def eq2kwargs(kwargs_str) -> Tuple[str, Any]:
"""Parse a key-value pair separated by '='."""
key, value = kwargs_str.split("=")
return key.strip(), literal_eval(value.strip())
| 30.448276
| 58
| 0.631937
|
80671298e4ce38ef89be4ded0a458e72828bb239
| 7,964
|
py
|
Python
|
bin/icell8_stats.py
|
fls-bioinformatics-core/auto_process_ngs
|
1f07a08e14f118e6a61d3f37130515efc6049dd7
|
[
"AFL-3.0"
] | 5
|
2017-01-31T21:37:09.000Z
|
2022-03-17T19:26:29.000Z
|
bin/icell8_stats.py
|
fls-bioinformatics-core/auto_process_ngs
|
1f07a08e14f118e6a61d3f37130515efc6049dd7
|
[
"AFL-3.0"
] | 294
|
2015-08-14T09:00:30.000Z
|
2022-03-18T10:17:05.000Z
|
bin/icell8_stats.py
|
fls-bioinformatics-core/auto_process_ngs
|
1f07a08e14f118e6a61d3f37130515efc6049dd7
|
[
"AFL-3.0"
] | 7
|
2017-11-23T07:52:21.000Z
|
2020-07-15T10:12:05.000Z
|
#!/usr/bin/env python
#
# icell8_stats.py: collects stats from fastqs from Wafergen iCell8
# Copyright (C) University of Manchester 2017 Peter Briggs
#
"""
icell8_stats.py
Utility to collect statistics across one or more FASTQ pairs from
Wafergen iCell8.
"""
######################################################################
# Imports
######################################################################
import sys
import os
import argparse
import logging
import tempfile
import shutil
import time
from bcftbx.TabFile import TabFile
from auto_process_ngs.fastq_utils import pair_fastqs
from auto_process_ngs.icell8.utils import ICell8WellList
from auto_process_ngs.icell8.utils import ICell8Stats
from auto_process_ngs.icell8.utils import get_batch_size
from auto_process_ngs.icell8.utils import batch_fastqs
from auto_process_ngs.icell8.constants import MAXIMUM_BATCH_SIZE
# Module specific logger
logger = logging.getLogger("icell8_stats")
######################################################################
# Main
######################################################################
if __name__ == "__main__":
print("[%s] ICell8 stats started" % time.strftime("%Y/%m/%d-%H:%M:%S"))
# Handle the command line
p = argparse.ArgumentParser()
p.add_argument("fastqs",nargs='*',metavar="FASTQ_R1 FASTQ_R2",
help="FASTQ file pairs")
p.add_argument("-w","--well-list",
dest="well_list_file",default=None,
help="iCell8 'well list' file")
p.add_argument("-u","--unassigned",action='store_true',
help="include 'unassigned' reads")
p.add_argument("-f","--stats-file",
dest="stats_file",
help="output statistics file")
p.add_argument("-a","--append",action='store_true',
help="append to statistics file")
p.add_argument("-s","--suffix",
dest="suffix",
help="suffix to attach to column names")
p.add_argument("-n","--nprocessors",
type=int,default=1,
help="number of processors/cores available for "
"statistics generation (default: 1)")
p.add_argument("-m","--max-batch-size",
type=int,default=MAXIMUM_BATCH_SIZE,
help="maximum number of reads per batch "
"when dividing Fastqs (multicore only; "
"default: %d)" % MAXIMUM_BATCH_SIZE)
p.add_argument("-T","--temporary-directory",
action="store",default=None,metavar="DIR",
help="use DIR for temporaries, not $TMPDIR "
"or /tmp")
args = p.parse_args()
# Input Fastqs
fastqs = args.fastqs
# Well list file
if args.well_list_file is not None:
well_list = ICell8WellList(args.well_list_file)
else:
well_list = None
# Number of cores
nprocs = args.nprocessors
print("%d processor%s will be used" % (nprocs,
('s' if nprocs != 1
else '')))
# Pair up Fastq files
fastqs,unpaired = pair_fastqs(fastqs)
if unpaired:
print("Unpaired Fastqs specified:")
for fq in unpaired:
print("- %s" % fq)
logging.fatal("Unpaired Fastqs specified")
sys.exit(1)
# Only need R1 Fastqs
fastqs = [pair[0] for pair in fastqs]
# Set up a working directory
if args.temporary_directory is not None:
tmpdir = os.path.abspath(args.temporary_directory)
else:
try:
tmpdir = os.path.abspath(os.environ["TMPDIR"])
except KeyError:
tmpdir = None
working_dir = tempfile.mkdtemp(suffix="icell8_stats",
dir=tmpdir)
print("Using working dir %s" % working_dir)
# Split into batches for multiprocessing
if nprocs > 1:
try:
batch_size,nbatches = get_batch_size(
fastqs,
max_batch_size=args.max_batch_size,
min_batches=nprocs)
batched_fastqs = batch_fastqs(
fastqs,batch_size,
basename="icell8_stats",
out_dir=working_dir)
except Exception as ex:
logging.critical("Failed to split Fastqs into batches: "
"%s" % ex)
sys.exit(1)
else:
batched_fastqs = fastqs
# Collect statistics
stats = ICell8Stats(*batched_fastqs,
nprocs=nprocs,
verbose=True)
# Remove the working directory
shutil.rmtree(working_dir)
# Report the stats
if args.stats_file is not None:
# Output column names
stats_file = os.path.abspath(args.stats_file)
nreads_col = "Nreads%s" % (''
if args.suffix is None
else args.suffix)
umis_col = "Distinct_UMIs%s" % (''
if args.suffix
is None else args.suffix)
if not (os.path.isfile(stats_file) and args.append):
# Create new stats file
if well_list is not None:
# Initialise barcode and sample names from well list
stats_data = TabFile(column_names=('Barcode',
'Sample'))
for barcode in well_list.barcodes():
stats_data.append(data=(barcode,
well_list.sample(barcode)))
else:
# Barcodes from collected data
stats_data = TabFile(column_names=('Barcode',))
for barcode in stats.barcodes():
stats_data.append(data=(barcode,))
else:
# Append to an existing file
stats_data = TabFile(filen=stats_file,
first_line_is_header=True)
# Add new columns of data
stats_data.appendColumn(nreads_col)
stats_data.appendColumn(umis_col)
# Populate columns
for data_line in stats_data:
barcode = data_line['Barcode']
try:
data_line[nreads_col] = stats.nreads(barcode)
data_line[umis_col] = len(stats.distinct_umis(barcode))
except KeyError:
data_line[nreads_col] = 0
data_line[umis_col] = 0
# Deal with 'unassigned' reads
if args.unassigned:
# Count reads for barcodes not in list
unassigned_reads = 0
unassigned_umis = set()
if well_list is not None:
expected_barcodes = well_list.barcodes()
else:
expected_barcodes = [l['Barcode'] for l in stats_data]
for barcode in stats.barcodes():
if barcode not in expected_barcodes:
unassigned_reads += stats.nreads(barcode=barcode)
unassigned_umis.update(
stats.distinct_umis(barcode=barcode))
# Check if 'unassigned' is already in stats file
unassigned = stats_data.lookup('Barcode','Unassigned')
try:
data_line = unassigned[0]
except IndexError:
# Append the line
data_line = stats_data.append()
data_line['Barcode'] = 'Unassigned'
data_line[nreads_col] = unassigned_reads
data_line[umis_col] = len(unassigned_umis)
# Write to file
stats_data.write(filen=stats_file,include_header=True)
# Report summary
print("#barcodes : %s" % len(stats.barcodes()))
print("#reads : %s" % stats.nreads())
print("[%s] ICell8 stats completed" % time.strftime("%Y/%m/%d-%H:%M:%S"))
| 38.105263
| 77
| 0.541185
|
c4f7c6cde4a2a5d414e5cd122caf8834480db4ca
| 6,089
|
py
|
Python
|
todo/commands/emojis.py
|
TwinDragon/JojoCogs
|
db94fba5656291250abd914e4f731e4ca284aca5
|
[
"MIT"
] | null | null | null |
todo/commands/emojis.py
|
TwinDragon/JojoCogs
|
db94fba5656291250abd914e4f731e4ca284aca5
|
[
"MIT"
] | null | null | null |
todo/commands/emojis.py
|
TwinDragon/JojoCogs
|
db94fba5656291250abd914e4f731e4ca284aca5
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 - Jojo#7791
# Licensed under MIT
import asyncio
from typing import Optional, Union
import discord
from emoji.unicode_codes import UNICODE_EMOJI_ENGLISH
from redbot.core import commands
from redbot.core.utils.predicates import ReactionPredicate
from ..abc import TodoMixin
__all__ = ["Emojis"]
async def pretty(ctx: commands.Context) -> bool:
return await ctx.cog.cache.get_user_setting(ctx.author, "pretty_todos")
class EmojiConverter(commands.EmojiConverter):
async def convert(self, ctx: commands.Context, arg: str) -> Union[str, discord.Emoji]:
arg = arg.strip()
return arg if arg in UNICODE_EMOJI_ENGLISH.keys() else await super().convert(ctx, arg)
class Emojis(TodoMixin):
"""Emojis for todo... fun"""
@commands.group(name="todoset")
async def todo_settings(self, *args):
...
@todo_settings.group(name="categoryemojis", aliases=["catemojis"])
async def category_emoji(self, ctx: commands.Context):
"""Set your category emojis"""
pass
@category_emoji.command(name="todoemoji", aliases=["temoji"])
async def category_todo_emoji(
self, ctx: commands.Context, reset: Optional[bool], emoji: EmojiConverter = None
):
"""Set the emoji for the todo category.
If you have markdown enabled only default emojis will work.
By default the emoji will be '🔘'.
**Arguments**
- `reset` If specified this will reset the emoji back to default.
- `emoji` The emoji that will be used for the category. This will skip the check. This argument can't be used if you have markdown enabled.
"""
if reset:
await self.cache.set_user_setting(ctx.author, "todo_category_emoji", None)
return await ctx.send("Your todo category emoji has been reset.")
elif not emoji:
return await ctx.send_help()
use_md = await self.cache.get_user_setting(ctx.author, "use_markdown")
if use_md:
return await ctx.send("You can't use custom emojis while having markdown enabled")
act_emoji = str(emoji)
await self.cache.set_user_setting(ctx.author, "todo_category_emoji", act_emoji)
return await ctx.send(f"Your todo category emoji has been set to '{act_emoji}'.")
@category_emoji.command(name="completedemoji", aliases=["cemoji"])
async def category_completed_emoji(
self, ctx: commands.Context, reset: Optional[bool], emoji: EmojiConverter = None
):
"""Set the emoji for the completed category.
If you have markdown enabled only default emojis will work.
By default the emoji will be '☑'.
**Arguments**
- `reset` If specified this will reset the emoji back to default.
- `emoji` The emoji that will be used for the category. This will skip the check, and this argument can't be used if you have markdown enabled.
"""
if reset:
await self.cache.set_user_setting(ctx.author, "completed_category_emoji", None)
return await ctx.send("Your completed category emoji has been reset")
elif not emoji:
return await ctx.send_help()
use_md = await self.cache.get_user_setting(ctx.author, "use_markdown")
if use_md:
return await ctx.send("You can't use custom emojis while having markdown enabled")
act_emoji = str(emoji)
await self.cache.set_user_setting(ctx.author, "completed_category_emoji", act_emoji)
return await ctx.send(f"Your completed category emoji has been set to '{act_emoji}'.")
@commands.check(pretty)
@todo_settings.command(name="todoemoji", aliases=("temoji",))
async def todo_emoji(
self, ctx: commands.Context, reset: Optional[bool], emoji: EmojiConverter = None
):
"""Set the emoji used for todos
This will prompt you to react with an emoji. Note that the emoji must be one the bot can use.
If you have markdown enabled only default emojis will work.
**Arguments**
- `reset` Whether to reset the emoji back to default.
- `emoji` The emoji that will be used for this
"""
if reset:
await self.cache.set_user_setting(ctx.author, "todo_emoji", None)
return await ctx.send("Done. Your emoji has been reset.")
elif not emoji:
return await ctx.send_help()
use_md = await self.cache.get_user_setting(ctx.author, "use_markdown")
if use_md:
return await ctx.send("You can't have custom emojis while markdown is enabled")
act_emoji = str(emoji)
await self.cache.set_user_setting(ctx.author, "todo_emoji", act_emoji)
return await ctx.send(f"I have set your todo emoji to '{act_emoji}'")
@commands.check(pretty)
@todo_settings.command(name="completeemoji", aliases=("cemoji",))
async def todo_complete_emoji(
self, ctx: commands.Context, reset: Optional[bool], emoji: EmojiConverter = None
):
"""Set the completed emoji used for completed todos.
This will prompt you to react with an emoji.
Note that only emojis that [botname] can use will work
**Arguments**
- `reset` Whether to reset the emoji back to default.
- `emoji` The emoji to use for the complete mark. This has to be custom and can only be an emoji that [botname] can use.
"""
if reset:
await self.cache.set_user_setting(ctx.author, "completed_emoji", None)
return await ctx.send("Done. Your emoji has been reset.")
elif not emoji:
return await ctx.send_help()
use_md = await self.cache.get_user_setting(ctx.author, "use_markdown")
if use_md:
return await ctx.send("You can't have custom emojis while markdown is enabled")
act_emoji = str(emoji)
await self.cache.set_user_setting(ctx.author, "completed_emoji", act_emoji)
return await ctx.send(f"I have set your completed emoji to '{act_emoji}'")
| 42.284722
| 155
| 0.662999
|
49150ab19ead0f1ec196397ce3c831b16f67afbd
| 6,470
|
py
|
Python
|
ibis/backends/postgres/tests/test_udf.py
|
rtpsw/ibis
|
d7318fdf87121cd8fadbcf0369a2b217aab3053a
|
[
"Apache-2.0"
] | 1
|
2022-03-22T10:39:37.000Z
|
2022-03-22T10:39:37.000Z
|
ibis/backends/postgres/tests/test_udf.py
|
marlenezw/ibis
|
14b9baf3e1021e8698e7f0ae3c0ae5747543431c
|
[
"Apache-2.0"
] | null | null | null |
ibis/backends/postgres/tests/test_udf.py
|
marlenezw/ibis
|
14b9baf3e1021e8698e7f0ae3c0ae5747543431c
|
[
"Apache-2.0"
] | null | null | null |
"""Test support for already-defined UDFs in Postgres"""
import functools
import pytest
import ibis
import ibis.expr.datatypes as dt
pytest.importorskip("psycopg2")
pytest.importorskip("sqlalchemy")
from ibis.backends.postgres.udf import ( # noqa: E402
PostgresUDFError,
existing_udf,
udf,
)
@pytest.fixture(scope='session')
def next_serial(con):
# `test_sequence` SEQUENCE is created in database in the
# load-data.sh --> datamgr.py#postgres step
# to avoid parallel attempts to create the same sequence (when testing
# run in parallel
serial_proxy = con.con.execute("SELECT nextval('test_sequence') as value;")
return serial_proxy.fetchone()['value']
@pytest.fixture(scope='session')
def test_schema(con, next_serial):
schema_name = f'udf_test_{next_serial}'
con.con.execute(f"CREATE SCHEMA IF NOT EXISTS {schema_name};")
return schema_name
@pytest.fixture(scope='session')
def table_name():
return 'udf_test_users'
@pytest.fixture(scope='session')
def sql_table_setup(test_schema, table_name):
return """DROP TABLE IF EXISTS {schema}.{table_name};
CREATE TABLE {schema}.{table_name} (
user_id integer,
user_name varchar,
name_length integer
);
INSERT INTO {schema}.{table_name} VALUES
(1, 'Raj', 3),
(2, 'Judy', 4),
(3, 'Jonathan', 8)
;
""".format(
schema=test_schema, table_name=table_name
)
@pytest.fixture(scope='session')
def sql_define_py_udf(test_schema):
return """CREATE OR REPLACE FUNCTION {schema}.pylen(x varchar)
RETURNS integer
LANGUAGE plpython3u
AS
$$
return len(x)
$$;""".format(
schema=test_schema
)
@pytest.fixture(scope='session')
def sql_define_udf(test_schema):
return """CREATE OR REPLACE FUNCTION {schema}.custom_len(x varchar)
RETURNS integer
LANGUAGE SQL
AS
$$
SELECT length(x);
$$;""".format(
schema=test_schema
)
@pytest.fixture(scope='session')
def con_for_udf(
con, test_schema, sql_table_setup, sql_define_udf, sql_define_py_udf
):
with con.con.begin() as c:
c.execute(sql_table_setup)
c.execute(sql_define_udf)
c.execute(sql_define_py_udf)
try:
yield con
finally:
# teardown
con.con.execute(f"DROP SCHEMA IF EXISTS {test_schema} CASCADE")
@pytest.fixture
def table(con_for_udf, table_name, test_schema):
return con_for_udf.table(table_name, schema=test_schema)
# Tests
def test_existing_sql_udf(test_schema, table):
"""Test creating ibis UDF object based on existing UDF in the database"""
# Create ibis UDF objects referring to UDFs already created in the database
custom_length_udf = existing_udf(
'custom_len',
input_types=[dt.string],
output_type=dt.int32,
schema=test_schema,
)
result_obj = table[
table, custom_length_udf(table['user_name']).name('custom_len')
]
result = result_obj.execute()
assert result['custom_len'].sum() == result['name_length'].sum()
def test_existing_plpython_udf(test_schema, table):
# Create ibis UDF objects referring to UDFs already created in the database
py_length_udf = existing_udf(
'pylen',
input_types=[dt.string],
output_type=dt.int32,
schema=test_schema,
)
result_obj = table[
table, py_length_udf(table['user_name']).name('custom_len')
]
result = result_obj.execute()
assert result['custom_len'].sum() == result['name_length'].sum()
def mult_a_b(a, b):
"""Test function to be defined in-database as a UDF
and used via ibis UDF"""
return a * b
def test_udf(con_for_udf, test_schema, table):
"""Test creating a UDF in database based on Python function
and then creating an ibis UDF object based on that"""
mult_a_b_udf = udf(
con_for_udf,
mult_a_b,
(dt.int32, dt.int32),
dt.int32,
schema=test_schema,
replace=True,
language="plpython3u",
)
table_filt = table.filter(table['user_id'] == 2)
expr = table_filt[
mult_a_b_udf(table_filt['user_id'], table_filt['name_length']).name(
'mult_result'
)
]
result = expr.execute()
assert result['mult_result'].iloc[0] == 8
def pysplit(text, split):
return text.split(split)
def test_array_type(con_for_udf, test_schema, table):
"""Test that usage of Array types work
Other scalar types can be represented either by the class or an instance,
but Array types work differently. Array types must be an instance,
because the Array class must be instantiated specifying the datatype
of the elements of the array.
"""
pysplit_udf = udf(
con_for_udf,
pysplit,
(dt.string, dt.string),
dt.Array(dt.string),
schema=test_schema,
replace=True,
language="plpython3u",
)
splitter = ibis.literal(' ', dt.string)
result = pysplit_udf(table['user_name'], splitter).name('split_name')
result.execute()
def test_client_udf_api(con_for_udf, test_schema, table):
"""Test creating a UDF in database based on Python function
using an ibis client method."""
def multiply(a, b):
return a * b
multiply_udf = con_for_udf.udf(
multiply,
[dt.int32, dt.int32],
dt.int32,
schema=test_schema,
replace=True,
language="plpython3u",
)
table_filt = table.filter(table['user_id'] == 2)
expr = table_filt[
multiply_udf(table_filt['user_id'], table_filt['name_length']).name(
'mult_result'
)
]
result = expr.execute()
assert result['mult_result'].iloc[0] == 8
def test_client_udf_decorator_fails(con_for_udf, test_schema):
"""Test that UDF creation fails when creating a UDF based on a Python
function that has been defined with decorators. Decorators are not
currently supported, because the decorators end up in the body of the UDF
but are not defined in the body, therefore causing a NameError."""
def decorator(f):
@functools.wraps(f)
def wrapped(*args, **kwds):
return f(*args, **kwds)
return wrapped
@decorator
def multiply(a, b):
return a * b
with pytest.raises(PostgresUDFError):
con_for_udf.udf(
multiply,
[dt.int32, dt.int32],
dt.int32,
schema=test_schema,
replace=True,
language="plpython3u",
)
| 26.408163
| 79
| 0.661669
|
0ad80b1b28c0385451d527ef4826a77675f5f474
| 1,686
|
py
|
Python
|
mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss.py
|
peisuke/mmdetection
|
342bcf97b7a56a079832e15dbc468289c9cb3f4b
|
[
"Apache-2.0"
] | null | null | null |
mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss.py
|
peisuke/mmdetection
|
342bcf97b7a56a079832e15dbc468289c9cb3f4b
|
[
"Apache-2.0"
] | null | null | null |
mmdet/ops/sigmoid_focal_loss/sigmoid_focal_loss.py
|
peisuke/mmdetection
|
342bcf97b7a56a079832e15dbc468289c9cb3f4b
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
import mmdet
if not mmdet.version.CPU_ONLY:
from . import sigmoid_focal_loss_cuda
class SigmoidFocalLossFunction(Function):
@staticmethod
def forward(ctx, input, target, gamma=2.0, alpha=0.25):
ctx.save_for_backward(input, target)
num_classes = input.shape[1]
ctx.num_classes = num_classes
ctx.gamma = gamma
ctx.alpha = alpha
loss = sigmoid_focal_loss_cuda.forward(input, target, num_classes,
gamma, alpha)
return loss
@staticmethod
@once_differentiable
def backward(ctx, d_loss):
input, target = ctx.saved_tensors
num_classes = ctx.num_classes
gamma = ctx.gamma
alpha = ctx.alpha
d_loss = d_loss.contiguous()
d_input = sigmoid_focal_loss_cuda.backward(input, target, d_loss,
num_classes, gamma, alpha)
return d_input, None, None, None, None
sigmoid_focal_loss = SigmoidFocalLossFunction.apply
# TODO: remove this module
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super(SigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, logits, targets):
assert logits.is_cuda
loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha)
return loss.sum()
def __repr__(self):
tmpstr = self.__class__.__name__ + '(gamma={}, alpha={})'.format(
self.gamma, self.alpha)
return tmpstr
| 29.068966
| 77
| 0.636418
|
898fbc92cbd215e476f58d12430de2ce061f8bc3
| 15,599
|
py
|
Python
|
pytorch_lightning/trainer/data_loading.py
|
GimmickNG/pytorch-lightning
|
b36c5e86d014671b0fa922d750b27420bc73b6f9
|
[
"Apache-2.0"
] | 1
|
2021-03-10T20:13:50.000Z
|
2021-03-10T20:13:50.000Z
|
pytorch_lightning/trainer/data_loading.py
|
GimmickNG/pytorch-lightning
|
b36c5e86d014671b0fa922d750b27420bc73b6f9
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/trainer/data_loading.py
|
GimmickNG/pytorch-lightning
|
b36c5e86d014671b0fa922d750b27420bc73b6f9
|
[
"Apache-2.0"
] | 1
|
2020-09-11T22:53:18.000Z
|
2020-09-11T22:53:18.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import platform
from abc import ABC, abstractmethod
from typing import Union, List, Tuple, Callable, Optional
import torch.distributed as torch_distrib
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_lightning.core import LightningModule
from pytorch_lightning.utilities import rank_zero_warn
from pytorch_lightning.utilities.data import has_iterable_dataset, has_len
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.debugging import InternalDebugger
from pytorch_lightning.utilities.model_utils import is_overridden
try:
from apex import amp
except ImportError:
amp = None
try:
import torch_xla
import torch_xla.core.xla_model as xm
import torch_xla.distributed.xla_multiprocessing as xmp
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
try:
import horovod.torch as hvd
except (ModuleNotFoundError, ImportError):
HOROVOD_AVAILABLE = False
else:
HOROVOD_AVAILABLE = True
class TrainerDataLoadingMixin(ABC):
# this is just a summary on variables used in this abstract class,
# the proper values/initialisation should be done in child class
global_rank: int
use_ddp: bool
use_ddp2: bool
use_horovod: bool
shown_warnings: ...
val_check_interval: float
use_tpu: bool
tpu_local_core_rank: int
train_dataloader: DataLoader
num_training_batches: Union[int, float]
val_check_batch: ...
val_dataloaders: List[DataLoader]
num_val_batches: List[Union[int, float]]
test_dataloaders: List[DataLoader]
num_test_batches: List[Union[int, float]]
limit_train_batches: Union[int, float]
limit_val_batches: Union[int, float]
limit_test_batches: Union[int, float]
replace_sampler_ddp: bool
num_nodes: int
num_processes: int
distributed_backend: Optional[str]
dev_debugger: InternalDebugger
def _worker_check(self, dataloader: DataLoader, name: str) -> None:
on_windows = platform.system() == 'Windows'
# ddp_spawn + num_workers > 0 don't mix! tell the user
is_dataloader = isinstance(dataloader, DataLoader)
using_spawn = self.distributed_backend == 'ddp_spawn'
if is_dataloader and not on_windows:
if dataloader.num_workers > 0 and using_spawn:
rank_zero_warn('Dataloader(num_workers>0) and ddp_spawn do not mix well!'
' Your performance might suffer dramatically.'
' Please consider setting distributed_backend=ddp to use num_workers > 0'
' (this is a bottleneck of Python .spawn() and PyTorch')
elif dataloader.num_workers == 0 and using_spawn:
rank_zero_warn('You are using `distributed_backend=ddp_spawn` with num_workers=0.'
' For much faster performance, switch to `distributed_backend=ddp`'
' and set `num_workers>0`')
elif dataloader.num_workers <= 2 and multiprocessing.cpu_count() > 2 and not using_spawn:
num_cpus = multiprocessing.cpu_count()
rank_zero_warn(f'The dataloader, {name}, does not have many workers which may be a bottleneck.'
' Consider increasing the value of the `num_workers` argument`'
f' (try {num_cpus} which is the number of cpus on this machine)'
' in the `DataLoader` init to improve performance.')
def auto_add_sampler(self, dataloader: DataLoader, train: bool) -> DataLoader:
# don't do anything if it's not a dataloader
is_dataloader = isinstance(dataloader, DataLoader)
# don't manipulate iterable datasets
is_iterable_ds = has_iterable_dataset(dataloader)
if not is_dataloader or is_iterable_ds:
return dataloader
need_dist_sampler = (self.use_ddp or self.use_ddp2 or self.use_horovod or self.use_tpu)
if self.replace_sampler_ddp and need_dist_sampler:
if not isinstance(dataloader.sampler, (SequentialSampler, RandomSampler)):
raise MisconfigurationException(
'You seem to have configured a sampler in your DataLoader. This will be replaced '
' by `DistributedSampler` since `replace_sampler_ddp` is True and you are using'
' distributed training. Either remove the sampler from your DataLoader or set'
' `replace_sampler_ddp`=False if you want to use your custom sampler.')
# replace with distributed sampler
sampler = self._get_distributed_sampler(dataloader, train)
dataloader = self.replace_sampler(dataloader, sampler)
return dataloader
def replace_sampler(self, dataloader, sampler):
skip_keys = ['sampler', 'batch_sampler', 'dataset_kind']
dl_args = {
k: v for k, v in dataloader.__dict__.items() if not k.startswith('_') and k not in skip_keys
}
dl_args['sampler'] = sampler
dataloader = type(dataloader)(**dl_args)
return dataloader
def _get_distributed_sampler(self, dataloader, train):
if self.use_tpu:
kwargs = dict(num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif self.use_horovod:
kwargs = dict(num_replicas=hvd.size(), rank=hvd.rank())
else:
world_size = {
'ddp': self.num_nodes * self.num_processes,
'ddp_spawn': self.num_nodes * self.num_processes,
'ddp2': self.num_nodes,
'ddp_cpu': self.num_processes * self.num_nodes
}
assert self.distributed_backend is not None
kwargs = dict(num_replicas=world_size[self.distributed_backend], rank=self.global_rank)
kwargs['shuffle'] = train
sampler = DistributedSampler(dataloader.dataset, **kwargs)
return sampler
def reset_train_dataloader(self, model: LightningModule) -> None:
"""Resets the train dataloader and initialises required variables
(number of batches, when to validate, etc.).
Args:
model: The current `LightningModule`
"""
self.train_dataloader = self.request_dataloader(model.train_dataloader)
# debugging
self.dev_debugger.track_load_dataloader_call('train_dataloader', dataloaders=[self.train_dataloader])
self.num_training_batches = 0
# automatically add samplers
self.train_dataloader = self.auto_add_sampler(self.train_dataloader, train=True)
self.num_training_batches = len(self.train_dataloader) if has_len(self.train_dataloader) else float('inf')
self._worker_check(self.train_dataloader, 'train dataloader')
if isinstance(self.limit_train_batches, int) or self.limit_train_batches == 0.0:
self.num_training_batches = min(self.num_training_batches, int(self.limit_train_batches))
elif self.num_training_batches != float('inf'):
self.num_training_batches = int(self.num_training_batches * self.limit_train_batches)
elif self.limit_train_batches != 1.0:
raise MisconfigurationException(
'When using an IterableDataset for `limit_train_batches`,'
' `Trainer(limit_train_batches)` must be `0.0`, `1.0` or an int. An int k specifies'
' `num_training_batches` to use.')
# determine when to check validation
# if int passed in, val checks that often
# otherwise, it checks in [0, 1.0] % range of a training epoch
if isinstance(self.val_check_interval, int):
self.val_check_batch = self.val_check_interval
if self.val_check_batch > self.num_training_batches:
raise ValueError(
f'`val_check_interval` ({self.val_check_interval}) must be less than or equal '
f'to the number of the training batches ({self.num_training_batches}). '
'If you want to disable validation set `limit_val_batches` to 0.0 instead.')
else:
if not has_len(self.train_dataloader):
if self.val_check_interval == 1.0:
self.val_check_batch = float('inf')
else:
raise MisconfigurationException(
'When using an IterableDataset for `train_dataloader`,'
' `Trainer(val_check_interval)` must be `1.0` or an int. An int k specifies'
' checking validation every k training batches.')
else:
self.val_check_batch = int(self.num_training_batches * self.val_check_interval)
self.val_check_batch = max(1, self.val_check_batch)
def _reset_eval_dataloader(
self,
model: LightningModule,
mode: str
) -> Tuple[List[Union[int, float]], List[DataLoader]]:
"""Generic method to reset a dataloader for evaluation.
Args:
model: The current `LightningModule`
mode: Either `'val'` or `'test'`
Returns:
Tuple (num_batches, dataloaders)
"""
# use the training loader as val and test when overfitting
loader_name = f'{mode}_dataloader'
if self.overfit_batches > 0:
loader_name = 'train_dataloader'
# load loaders
dataloaders = self.request_dataloader(getattr(model, loader_name))
if not isinstance(dataloaders, list):
dataloaders = [dataloaders]
self.dev_debugger.track_load_dataloader_call(loader_name, dataloaders=dataloaders)
for loader_i in range(len(dataloaders)):
loader = dataloaders[loader_i]
# shuffling in val and test set is bad practice
if mode in ('val', 'test') and hasattr(loader, 'sampler') and isinstance(loader.sampler, RandomSampler):
# when overfitting, the dataloader should not have sampler
if self.overfit_batches > 0:
rank_zero_warn('You requested to overfit but enabled training dataloader shuffling.'
' We are turning it off for you.')
dataloaders[loader_i] = self.replace_sampler(loader, SequentialSampler(loader.dataset))
else:
rank_zero_warn(f'Your {mode}_dataloader has `shuffle=True`, it is best practice to turn'
' this off for validation and test dataloaders.')
if any([dl is None for dl in dataloaders]):
rank_zero_warn("One of given dataloaders is None and it will be skipped.")
# add samplers
dataloaders = [self.auto_add_sampler(dl, train=False) for dl in dataloaders if dl is not None]
loader_num_batches = []
# determine number of batches
# datasets could be none, 1 or 2+
if len(dataloaders) != 0:
for i, dataloader in enumerate(dataloaders):
num_batches = len(dataloader) if has_len(dataloader) else float('inf')
self._worker_check(dataloader, f'{mode} dataloader {i}')
# percent or num_steps
limit_eval_batches = getattr(self, f'limit_{mode}_batches')
# limit num batches either as a percent or num steps
if isinstance(limit_eval_batches, int) or limit_eval_batches == 0.0:
num_batches = min(num_batches, int(limit_eval_batches))
elif num_batches != float('inf'):
num_batches = int(num_batches * limit_eval_batches)
elif limit_eval_batches != 1.0:
raise MisconfigurationException(
'When using an IterableDataset for `limit_{mode}_batches`,'
f' `Trainer(limit_{mode}_batches)` must be `0.0`, `1.0` or an int. An int k specifies'
f' `num_{mode}_batches` to use.')
if num_batches == 0 and limit_eval_batches > 0.0 and isinstance(limit_eval_batches, float):
min_pct = 1.0 / len(dataloader)
raise MisconfigurationException(
f'you requested to check {limit_eval_batches} of the {mode} dataloader but'
f' {limit_eval_batches}*{num_batches} = 0. Please increase the limit_{mode}_batches.'
f' Try at least limit_{mode}_batches={min_pct}'
)
loader_num_batches.append(num_batches)
return loader_num_batches, dataloaders
def reset_val_dataloader(self, model: LightningModule) -> None:
"""Resets the validation dataloader and determines the number of batches.
Args:
model: The current `LightningModule`
"""
has_loader = is_overridden('val_dataloader', model)
has_step = is_overridden('validation_step', model)
if has_loader and has_step:
self.num_val_batches, self.val_dataloaders = self._reset_eval_dataloader(model, 'val')
def reset_test_dataloader(self, model) -> None:
"""Resets the validation dataloader and determines the number of batches.
Args:
model: The current `LightningModule`
"""
has_loader = is_overridden('test_dataloader', model)
has_step = is_overridden('test_step', model)
if has_loader and has_step:
self.num_test_batches, self.test_dataloaders =\
self._reset_eval_dataloader(model, 'test')
def request_dataloader(self, dataloader_fx: Callable) -> DataLoader:
"""Handles downloading data in the GPU or TPU case.
Args:
dataloader_fx: The bound dataloader getter
Returns:
The dataloader
"""
dataloader = dataloader_fx()
# get the function we'll use to get data
if self.use_ddp or self.use_ddp2:
# all processes wait until data download has happened
torch_distrib.barrier()
# data download/load on TPU
elif self.use_tpu and XLA_AVAILABLE:
# all processes wait until data download has happened
torch_xla.core.xla_model.rendezvous('pl.TrainerDataLoadingMixin.get_dataloaders')
elif self.use_horovod:
# all processes wait until data download has happened
hvd.join()
return dataloader
def determine_data_use_amount(self, overfit_batches: float) -> None:
"""Use less data for debugging purposes"""
if overfit_batches > 0:
self.limit_train_batches = overfit_batches
self.limit_val_batches = overfit_batches
self.limit_test_batches = overfit_batches
| 43.940845
| 116
| 0.646516
|
aa4021376d75be1ba1c77b36b6b1a1e0db651219
| 12,187
|
py
|
Python
|
holoviews/tests/plotting/bokeh/testbarplot.py
|
cocoaaa/holoviews
|
bb3dc4975c2604f38d141ccad1c931ed5d9b1322
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/tests/plotting/bokeh/testbarplot.py
|
cocoaaa/holoviews
|
bb3dc4975c2604f38d141ccad1c931ed5d9b1322
|
[
"BSD-3-Clause"
] | null | null | null |
holoviews/tests/plotting/bokeh/testbarplot.py
|
cocoaaa/holoviews
|
bb3dc4975c2604f38d141ccad1c931ed5d9b1322
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from holoviews.core.overlay import NdOverlay
from holoviews.element import Bars
from bokeh.models import CategoricalColorMapper, LinearColorMapper
from ..utils import ParamLogStream
from .testplot import TestBokehPlot, bokeh_renderer
class TestBarPlot(TestBokehPlot):
def test_bars_hover_ensure_kdims_sanitized(self):
obj = Bars(np.random.rand(10,2), kdims=['Dim with spaces'])
obj = obj(plot={'tools': ['hover']})
self._test_hover_info(obj, [('Dim with spaces', '@{Dim_with_spaces}'), ('y', '@{y}')])
def test_bars_hover_ensure_vdims_sanitized(self):
obj = Bars(np.random.rand(10,2), vdims=['Dim with spaces'])
obj = obj(plot={'tools': ['hover']})
self._test_hover_info(obj, [('x', '@{x}'), ('Dim with spaces', '@{Dim_with_spaces}')])
def test_bars_suppress_legend(self):
bars = Bars([('A', 1), ('B', 2)]).opts(plot=dict(show_legend=False))
plot = bokeh_renderer.get_plot(bars)
plot.initialize_plot()
fig = plot.state
self.assertEqual(len(fig.legend), 0)
def test_empty_bars(self):
bars = Bars([], kdims=['x', 'y'], vdims=['z']).opts(plot=dict(group_index=1))
plot = bokeh_renderer.get_plot(bars)
plot.initialize_plot()
source = plot.handles['source']
for v in source.data.values():
self.assertEqual(len(v), 0)
def test_bars_grouped_categories(self):
bars = Bars([('A', 0, 1), ('A', 1, -1), ('B', 0, 2)],
kdims=['Index', 'Category'], vdims=['Value'])
plot = bokeh_renderer.get_plot(bars)
source = plot.handles['source']
self.assertEqual([tuple(x) for x in source.data['xoffsets']],
[('A', '0'), ('B', '0'), ('A', '1')])
self.assertEqual(list(source.data['Category']), ['0', '0', '1'])
self.assertEqual(source.data['Value'], np.array([1, 2, -1]))
x_range = plot.handles['x_range']
self.assertEqual(x_range.factors, [('A', '0'), ('A', '1'), ('B', '0'), ('B', '1')])
def test_box_whisker_multi_level_sorted(self):
box= Bars((['A', 'B']*15, [3, 10, 1]*10, np.random.randn(30)),
['Group', 'Category'], 'Value').aggregate(function=np.mean)
plot = bokeh_renderer.get_plot(box)
x_range = plot.handles['x_range']
self.assertEqual(x_range.factors, [
('A', '1'), ('A', '3'), ('A', '10'), ('B', '1'), ('B', '3'), ('B', '10')])
def test_bars_positive_negative_mixed(self):
bars = Bars([('A', 0, 1), ('A', 1, -1), ('B', 0, 2)],
kdims=['Index', 'Category'], vdims=['Value'])
plot = bokeh_renderer.get_plot(bars.opts(plot=dict(stack_index=1)))
source = plot.handles['source']
self.assertEqual(list(source.data['Category']), ['1', '0', '0'])
self.assertEqual(list(source.data['Index']), ['A', 'A', 'B'])
self.assertEqual(source.data['top'], np.array([0, 1, 2]))
self.assertEqual(source.data['bottom'], np.array([-1, 0, 0]))
def test_bars_logy(self):
bars = Bars([('A', 1), ('B', 2), ('C', 3)],
kdims=['Index'], vdims=['Value'])
plot = bokeh_renderer.get_plot(bars.opts(plot=dict(logy=True)))
source = plot.handles['source']
glyph = plot.handles['glyph']
y_range = plot.handles['y_range']
self.assertEqual(list(source.data['Index']), ['A', 'B', 'C'])
self.assertEqual(source.data['Value'], np.array([1, 2, 3]))
self.assertEqual(glyph.bottom, 10**(np.log10(3)-2))
self.assertEqual(y_range.start, 10**(np.log10(3)-2))
self.assertEqual(y_range.end, 3.)
def test_bars_logy_explicit_range(self):
bars = Bars([('A', 1), ('B', 2), ('C', 3)],
kdims=['Index'], vdims=['Value']).redim.range(Value=(0.001, 3))
plot = bokeh_renderer.get_plot(bars.opts(plot=dict(logy=True)))
source = plot.handles['source']
glyph = plot.handles['glyph']
y_range = plot.handles['y_range']
self.assertEqual(list(source.data['Index']), ['A', 'B', 'C'])
self.assertEqual(source.data['Value'], np.array([1, 2, 3]))
self.assertEqual(glyph.bottom, 0.001)
self.assertEqual(y_range.start, 0.001)
self.assertEqual(y_range.end, 3.0000000000000013)
def test_bars_ylim(self):
bars = Bars([1, 2, 3]).opts(ylim=(0, 200))
plot = bokeh_renderer.get_plot(bars)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 200)
def test_bars_padding_square(self):
points = Bars([(1, 2), (2, -1), (3, 3)]).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, -1.4)
self.assertEqual(y_range.end, 3.4)
def test_bars_padding_square_positive(self):
points = Bars([(1, 2), (2, 1), (3, 3)]).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_bars_padding_square_negative(self):
points = Bars([(1, -2), (2, -1), (3, -3)]).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, -3.2)
self.assertEqual(y_range.end, 0)
def test_bars_padding_nonsquare(self):
bars = Bars([(1, 2), (2, 1), (3, 3)]).options(padding=0.1, width=600)
plot = bokeh_renderer.get_plot(bars)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_bars_padding_logx(self):
bars = Bars([(1, 1), (2, 2), (3,3)]).options(padding=0.1, logx=True)
plot = bokeh_renderer.get_plot(bars)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_bars_padding_logy(self):
bars = Bars([(1, 2), (2, 1), (3, 3)]).options(padding=0.1, logy=True)
plot = bokeh_renderer.get_plot(bars)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0.033483695221017122)
self.assertEqual(y_range.end, 3.3483695221017129)
###########################
# Styling mapping #
###########################
def test_bars_color_op(self):
bars = Bars([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims=['y', 'color']).options(color='color')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['color'], np.array(['#000', '#F00', '#0F0']))
self.assertEqual(glyph.fill_color, {'field': 'color'})
self.assertEqual(glyph.line_color, 'black')
def test_bars_linear_color_op(self):
bars = Bars([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims=['y', 'color']).options(color='color')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
cmapper = plot.handles['color_color_mapper']
self.assertTrue(cmapper, LinearColorMapper)
self.assertEqual(cmapper.low, 0)
self.assertEqual(cmapper.high, 2)
self.assertEqual(cds.data['color'], np.array([0, 1, 2]))
self.assertEqual(glyph.fill_color, {'field': 'color', 'transform': cmapper})
self.assertEqual(glyph.line_color, 'black')
def test_bars_categorical_color_op(self):
bars = Bars([(0, 0, 'A'), (0, 1, 'B'), (0, 2, 'C')],
vdims=['y', 'color']).options(color='color')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
cmapper = plot.handles['color_color_mapper']
self.assertTrue(cmapper, CategoricalColorMapper)
self.assertEqual(cmapper.factors, ['A', 'B', 'C'])
self.assertEqual(cds.data['color'], np.array(['A', 'B', 'C']))
self.assertEqual(glyph.fill_color, {'field': 'color', 'transform': cmapper})
self.assertEqual(glyph.line_color, 'black')
def test_bars_line_color_op(self):
bars = Bars([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims=['y', 'color']).options(line_color='color')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_color'], np.array(['#000', '#F00', '#0F0']))
self.assertNotEqual(glyph.fill_color, {'field': 'line_color'})
self.assertEqual(glyph.line_color, {'field': 'line_color'})
def test_bars_fill_color_op(self):
bars = Bars([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims=['y', 'color']).options(fill_color='color')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['fill_color'], np.array(['#000', '#F00', '#0F0']))
self.assertEqual(glyph.fill_color, {'field': 'fill_color'})
self.assertNotEqual(glyph.line_color, {'field': 'fill_color'})
def test_bars_alpha_op(self):
bars = Bars([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims=['y', 'alpha']).options(alpha='alpha')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['alpha'], np.array([0, 0.2, 0.7]))
self.assertEqual(glyph.fill_alpha, {'field': 'alpha'})
def test_bars_line_alpha_op(self):
bars = Bars([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims=['y', 'alpha']).options(line_alpha='alpha')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_alpha'], np.array([0, 0.2, 0.7]))
self.assertEqual(glyph.line_alpha, {'field': 'line_alpha'})
self.assertNotEqual(glyph.fill_alpha, {'field': 'line_alpha'})
def test_bars_fill_alpha_op(self):
bars = Bars([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims=['y', 'alpha']).options(fill_alpha='alpha')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['fill_alpha'], np.array([0, 0.2, 0.7]))
self.assertNotEqual(glyph.line_alpha, {'field': 'fill_alpha'})
self.assertEqual(glyph.fill_alpha, {'field': 'fill_alpha'})
def test_bars_line_width_op(self):
bars = Bars([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims=['y', 'line_width']).options(line_width='line_width')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_width'], np.array([1, 4, 8]))
self.assertEqual(glyph.line_width, {'field': 'line_width'})
def test_op_ndoverlay_value(self):
colors = ['blue', 'red']
overlay = NdOverlay({color: Bars(np.arange(i+2)) for i, color in enumerate(colors)}, 'Color').options('Bars', fill_color='Color')
plot = bokeh_renderer.get_plot(overlay)
for subplot, color in zip(plot.subplots.values(), colors):
self.assertEqual(subplot.handles['glyph'].fill_color, color)
def test_bars_color_index_color_clash(self):
bars = Bars([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims=['y', 'color']).options(color='color', color_index='color')
with ParamLogStream() as log:
bokeh_renderer.get_plot(bars)
log_msg = log.stream.read()
warning = ("Cannot declare style mapping for 'color' option "
"and declare a color_index; ignoring the color_index.\n")
self.assertEqual(log_msg, warning)
| 47.054054
| 137
| 0.575531
|
08c453399ac8ae12f9ff2867b84b624f818d12a8
| 1,583
|
py
|
Python
|
db.py
|
tarnowski-git/hardware_parts_manager
|
e8dbfec0dfbf643f4ba97db88c53ee61b9b7b53b
|
[
"MIT"
] | 1
|
2022-03-14T21:14:04.000Z
|
2022-03-14T21:14:04.000Z
|
db.py
|
tarnowski-git/hardware_parts_manager
|
e8dbfec0dfbf643f4ba97db88c53ee61b9b7b53b
|
[
"MIT"
] | null | null | null |
db.py
|
tarnowski-git/hardware_parts_manager
|
e8dbfec0dfbf643f4ba97db88c53ee61b9b7b53b
|
[
"MIT"
] | null | null | null |
import sqlite3
class Database:
def __init__(self, db):
self.conn = sqlite3.connect(db)
self.cur = self.conn.cursor()
self.cur.execute(
"CREATE TABLE IF NOT EXISTS parts (id INTEGER PRIMARY KEY, part text, customer text, retailer text, price text)")
self.conn.commit()
def fetch(self):
self.cur.execute("SELECT * FROM parts")
rows = self.cur.fetchall()
return rows
def insert(self, part, customer, retailer, price):
self.cur.execute("INSERT INTO parts VALUES (NULL, ?, ?, ?, ?)",
(part, customer, retailer, price))
self.conn.commit()
def remove(self, id):
self.cur.execute("DELETE FROM parts WHERE id = ?", (id, ))
self.conn.commit()
def update(self, id, part, customer, retailer, price):
self.cur.execute("UPDATE parts SET part = ?, customer = ?, retailer = ?, price = ? WHERE id = ?",
(part, customer, retailer, price, id))
self.conn.commit()
def __del__(self):
self.conn.close()
# To create a databese
# db = Database("store.db")
# db.insert("4GB DDR4 Ram", "John Doe", "Microcenter", "160")
# db.insert("Asus Mobo", "Mike Henry", "Microcenter", "360")
# db.insert("500w PSU", "Karen Johnson", "Newegg", "80")
# db.insert("2GB DDR4 Ram", "Karen Johnson", "Newegg", "70")
# db.insert("24 inch Samsung Monitor", "Sam Smith", "Best Buy", "180")
# db.insert("NVIDIA RTX 2080", "Albert Kingston", "Newegg", "679")
# db.insert("600w Corsair PSU", "Karen Johnson", "Newegg", "130")
| 36.813953
| 125
| 0.595073
|
8ed93fdfe17e4094620897285f1f6df4d1e77fa6
| 4,293
|
py
|
Python
|
misc/src/guid_c/system.py
|
hivesolutions/colony_plugins
|
cfd8fb2ac58037e01002966704b8a642feb37895
|
[
"Apache-1.1"
] | 1
|
2016-10-30T09:51:06.000Z
|
2016-10-30T09:51:06.000Z
|
misc/src/guid_c/system.py
|
hivesolutions/colony_plugins
|
cfd8fb2ac58037e01002966704b8a642feb37895
|
[
"Apache-1.1"
] | 1
|
2015-12-29T18:51:07.000Z
|
2015-12-29T18:51:07.000Z
|
misc/src/guid_c/system.py
|
hivesolutions/colony_plugins
|
cfd8fb2ac58037e01002966704b8a642feb37895
|
[
"Apache-1.1"
] | 1
|
2018-01-26T12:54:13.000Z
|
2018-01-26T12:54:13.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Colony Framework
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Colony Framework.
#
# Hive Colony Framework is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Colony Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Colony Framework. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import time
import socket
import random
import threading
import colony
MAX_COUNTER = 0xfffffffe
""" The max counter value """
class GUID(colony.System):
"""
The GUID class responsible for the generation and
management of globally unique identifiers.
:see: https://en.wikipedia.org/wiki/Universally_unique_identifier
"""
def __init__(self, plugin):
colony.System.__init__(self, plugin)
self.counter = colony.legacy.LONG(0)
self.first_counter = MAX_COUNTER
self.last_time = 0
self.ip = ""
self.lock = threading.RLock()
try:
self.ip = socket.getaddrinfo(socket.gethostname(), 0)[-1][-1][0]
self.hexadecimal_ip = make_hexadecimal_ip(self.ip)
# in case there is no ip, defaults to something in the 10.x.x.x private range
except Exception:
self.ip = "10"
rand = random.Random()
for _index in colony.legacy.xrange(3):
# might as well use IPv6 range if we're making it up
self.ip += "." + str(rand.randrange(1, 0xffff))
self.hexadecimal_ip = make_hexadecimal_ip(self.ip)
def generate_guid(self):
"""
Generates a GUID (unique in space and time) number.
:rtype: String
:return: The unique GUID.
"""
# acquires the lock, only one GUID at the same time
self.lock.acquire()
try:
# the list that represents the various parts of the GUID
parts = []
# do we need to wait for the next millisecond (are we out of counters?)
now = colony.legacy.LONG(time.time() * 1000)
while self.last_time == now and self.counter == self.first_counter:
time.sleep(.01)
now = colony.legacy.LONG(time.time() * 1000)
# appends time part
parts.append("%016x" % now)
# time to start counter over since we have a different millisecond
if self.last_time != now:
# start at random position
self.first_counter = colony.legacy.LONG(random.uniform(1, MAX_COUNTER))
self.counter = self.first_counter
self.counter += 1
if self.counter > MAX_COUNTER:
self.counter = 0
self.last_time = now
# appends counter part
parts.append("%08x" % (self.counter))
# appends ip part
parts.append(self.hexadecimal_ip)
# put all the parts together
return "".join(parts)
finally:
# releases the lock, more GUID can be generated now
self.lock.release()
make_hexadecimal_ip = lambda ip: "".join(["%04x" % colony.legacy.LONG(index) for index in ip.split(".")])
""" Makes an hexadecimal IP from a decimal dot-separated ip (eg: 127.0.0.1) """
| 33.539063
| 106
| 0.607501
|
a102f247b4dd87832de9e512a65f627b183492a3
| 3,110
|
py
|
Python
|
website/website/settings.py
|
chrazbee/DataMole
|
cae54785c58409f8bc2c9df236bd51239504f0c2
|
[
"BSD-2-Clause"
] | null | null | null |
website/website/settings.py
|
chrazbee/DataMole
|
cae54785c58409f8bc2c9df236bd51239504f0c2
|
[
"BSD-2-Clause"
] | null | null | null |
website/website/settings.py
|
chrazbee/DataMole
|
cae54785c58409f8bc2c9df236bd51239504f0c2
|
[
"BSD-2-Clause"
] | 1
|
2018-03-16T14:46:57.000Z
|
2018-03-16T14:46:57.000Z
|
"""
Django settings for website project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'srw23qt9s4ufp)dki1s#1hay9-k--a+fd@-#*p&^%_&6^d3h0v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'website.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'website.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.491803
| 91
| 0.69582
|
ac69fe90296b56a4571e02cfe9e4a45d22d3f520
| 353
|
py
|
Python
|
X on graph paper.py
|
yoursamlan/FunWithTurtle
|
b4376c45e04bb3af9eaf50fa1bcda5baacd3d00f
|
[
"MIT"
] | 1
|
2019-07-08T11:47:57.000Z
|
2019-07-08T11:47:57.000Z
|
X on graph paper.py
|
yoursamlan/FunWithTurtle
|
b4376c45e04bb3af9eaf50fa1bcda5baacd3d00f
|
[
"MIT"
] | null | null | null |
X on graph paper.py
|
yoursamlan/FunWithTurtle
|
b4376c45e04bb3af9eaf50fa1bcda5baacd3d00f
|
[
"MIT"
] | 1
|
2020-03-31T06:40:42.000Z
|
2020-03-31T06:40:42.000Z
|
#It's the funniest way to draw X on a graph paper
import turtle
from turtle import *
for i in range (20,500,10):
turtle.forward(int(i))
turtle.right(90)
for j in range (20,100,10):
turtle.forward(int(i-j))
turtle.right(90)
for k in range (20,50,50):
turtle.forward(int(i+k))
turtle.right(90)
| 25.214286
| 49
| 0.594901
|
93f07762bf16411b9b54a5f8b9aeb5d06883fde3
| 110
|
py
|
Python
|
test_suite/suite/test09/test.py
|
joncatanio/cannoli
|
410f6bea362bf9e33eecc0e01fb080dadd14ef23
|
[
"MIT"
] | 755
|
2017-12-09T05:34:43.000Z
|
2022-03-26T09:15:56.000Z
|
test_suite/suite/test09/test.py
|
joncatanio/cannoli
|
410f6bea362bf9e33eecc0e01fb080dadd14ef23
|
[
"MIT"
] | 8
|
2017-12-12T01:03:18.000Z
|
2020-06-29T01:41:03.000Z
|
test_suite/suite/test09/test.py
|
joncatanio/cannoli
|
410f6bea362bf9e33eecc0e01fb080dadd14ef23
|
[
"MIT"
] | 23
|
2018-05-17T17:48:23.000Z
|
2022-03-26T09:15:57.000Z
|
import some_mod, other_mod
some_mod.func()
other_mod.functione(5)
a = some_mod.some_class()
a.print_self()
| 12.222222
| 26
| 0.763636
|
56eebe15f4e5e0bca5c782cafc43c500663d8afa
| 16,706
|
py
|
Python
|
mne/utils/_logging.py
|
0reza/mne-python
|
da02a256423404a81929d6de278bc63d3192a280
|
[
"BSD-3-Clause"
] | null | null | null |
mne/utils/_logging.py
|
0reza/mne-python
|
da02a256423404a81929d6de278bc63d3192a280
|
[
"BSD-3-Clause"
] | null | null | null |
mne/utils/_logging.py
|
0reza/mne-python
|
da02a256423404a81929d6de278bc63d3192a280
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Some utility functions."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import contextlib
from decorator import FunctionMaker
import importlib
import inspect
from io import StringIO
import re
import sys
import logging
import os.path as op
import warnings
from typing import Any, Callable, TypeVar
from .docs import fill_doc
logger = logging.getLogger('mne') # one selection here used across mne-python
logger.propagate = False # don't propagate (in case of multiple imports)
# class to provide frame information (should be low overhead, just on logger
# calls)
class _FrameFilter(logging.Filter):
def __init__(self):
self.add_frames = 0
def filter(self, record):
record.frame_info = 'Unknown'
if self.add_frames:
# 5 is the offset necessary to get out of here and the logging
# module, reversal is to put the oldest at the top
frame_info = _frame_info(5 + self.add_frames)[5:][::-1]
if len(frame_info):
frame_info[-1] = (frame_info[-1] + ' :').ljust(30)
if len(frame_info) > 1:
frame_info[0] = '┌' + frame_info[0]
frame_info[-1] = '└' + frame_info[-1]
for ii, info in enumerate(frame_info[1:-1], 1):
frame_info[ii] = '├' + info
record.frame_info = '\n'.join(frame_info)
return True
_filter = _FrameFilter()
logger.addFilter(_filter)
# Provide help for static type checkers:
# https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators
_FuncT = TypeVar('_FuncT', bound=Callable[..., Any])
def verbose(function: _FuncT) -> _FuncT:
"""Verbose decorator to allow functions to override log-level.
Parameters
----------
function : callable
Function to be decorated by setting the verbosity level.
Returns
-------
dec : callable
The decorated function.
See Also
--------
set_log_level
set_config
Notes
-----
This decorator is used to set the verbose level during a function or method
call, such as :func:`mne.compute_covariance`. The `verbose` keyword
argument can be 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', True (an
alias for 'INFO'), or False (an alias for 'WARNING'). To set the global
verbosity level for all functions, use :func:`mne.set_log_level`.
This function also serves as a docstring filler.
Examples
--------
You can use the ``verbose`` argument to set the verbose level on the fly::
>>> import mne
>>> cov = mne.compute_raw_covariance(raw, verbose='WARNING') # doctest: +SKIP
>>> cov = mne.compute_raw_covariance(raw, verbose='INFO') # doctest: +SKIP
Using up to 49 segments
Number of samples used : 5880
[done]
""" # noqa: E501
# See https://decorator.readthedocs.io/en/latest/tests.documentation.html
# #dealing-with-third-party-decorators
try:
fill_doc(function)
except TypeError: # nothing to add
pass
# Anything using verbose should have `verbose=None` in the signature.
# This code path will raise an error if this is not the case.
body = """\
def %(name)s(%(signature)s):\n
try:
do_level_change = verbose is not None
except (NameError, UnboundLocalError):
raise RuntimeError('Function/method %%s does not accept verbose '
'parameter' %% (_function_,)) from None
if do_level_change:
with _use_log_level_(verbose):
return _function_(%(shortsignature)s)
else:
return _function_(%(shortsignature)s)"""
evaldict = dict(
_use_log_level_=use_log_level, _function_=function)
fm = FunctionMaker(function, None, None, None, None, function.__module__)
attrs = dict(__wrapped__=function, __qualname__=function.__qualname__,
__globals__=function.__globals__)
return fm.make(body, evaldict, addsource=True, **attrs)
@fill_doc
class use_log_level:
"""Context manager for logging level.
Parameters
----------
%(verbose)s
%(add_frames)s
See Also
--------
mne.verbose
Notes
-----
See the :ref:`logging documentation <tut-logging>` for details.
Examples
--------
>>> from mne import use_log_level
>>> from mne.utils import logger
>>> with use_log_level(False):
... # Most MNE logger messages are "info" level, False makes them not
... # print:
... logger.info('This message will not be printed')
>>> with use_log_level(True):
... # Using verbose=True in functions, methods, or this context manager
... # will ensure they are printed
... logger.info('This message will be printed!')
This message will be printed!
"""
def __init__(self, verbose, *, add_frames=None): # noqa: D102
self._level = verbose
self._add_frames = add_frames
self._old_frames = _filter.add_frames
def __enter__(self): # noqa: D105
self._old_level = set_log_level(
self._level, return_old_level=True, add_frames=self._add_frames)
def __exit__(self, *args): # noqa: D105
add_frames = self._old_frames if self._add_frames is not None else None
set_log_level(self._old_level, add_frames=add_frames)
@fill_doc
def set_log_level(verbose=None, return_old_level=False, add_frames=None):
"""Set the logging level.
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
If None, the environment variable MNE_LOGGING_LEVEL is read, and if
it doesn't exist, defaults to INFO.
return_old_level : bool
If True, return the old verbosity level.
%(add_frames)s
Returns
-------
old_level : int
The old level. Only returned if ``return_old_level`` is True.
"""
from .config import get_config
from .check import _check_option, _validate_type
_validate_type(verbose, (bool, str, int, None), 'verbose')
if verbose is None:
verbose = get_config('MNE_LOGGING_LEVEL', 'INFO')
elif isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, str):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
_check_option('verbose', verbose, logging_types, '(when a string)')
verbose = logging_types[verbose]
old_verbose = logger.level
if verbose != old_verbose:
logger.setLevel(verbose)
if add_frames is not None:
_filter.add_frames = int(add_frames)
fmt = '%(frame_info)s ' if add_frames else ''
fmt += '%(message)s'
fmt = logging.Formatter(fmt)
for handler in logger.handlers:
handler.setFormatter(fmt)
return (old_verbose if return_old_level else None)
def set_log_file(fname=None, output_format='%(message)s', overwrite=None):
"""Set the log to print to a file.
Parameters
----------
fname : str, or None
Filename of the log to print to. If None, stdout is used.
To suppress log outputs, use set_log_level('WARN').
output_format : str
Format of the output messages. See the following for examples:
https://docs.python.org/dev/howto/logging.html
e.g., "%(asctime)s - %(levelname)s - %(message)s".
overwrite : bool | None
Overwrite the log file (if it exists). Otherwise, statements
will be appended to the log (default). None is the same as False,
but additionally raises a warning to notify the user that log
entries will be appended.
"""
_remove_close_handlers(logger)
if fname is not None:
if op.isfile(fname) and overwrite is None:
# Don't use warn() here because we just want to
# emit a warnings.warn here (not logger.warn)
warnings.warn('Log entries will be appended to the file. Use '
'overwrite=False to avoid this message in the '
'future.', RuntimeWarning, stacklevel=2)
overwrite = False
mode = 'w' if overwrite else 'a'
lh = logging.FileHandler(fname, mode=mode)
else:
""" we should just be able to do:
lh = logging.StreamHandler(sys.stdout)
but because doctests uses some magic on stdout, we have to do this:
"""
lh = logging.StreamHandler(WrapStdOut())
lh.setFormatter(logging.Formatter(output_format))
# actually add the stream handler
logger.addHandler(lh)
def _remove_close_handlers(logger):
for h in list(logger.handlers):
# only remove our handlers (get along nicely with nose)
if isinstance(h, (logging.FileHandler, logging.StreamHandler)):
if isinstance(h, logging.FileHandler):
h.close()
logger.removeHandler(h)
class ClosingStringIO(StringIO):
"""StringIO that closes after getvalue()."""
def getvalue(self, close=True):
"""Get the value."""
out = super().getvalue()
if close:
self.close()
return out
class catch_logging(object):
"""Store logging.
This will remove all other logging handlers, and return the handler to
stdout when complete.
"""
def __init__(self, verbose=None):
self.verbose = verbose
def __enter__(self): # noqa: D105
if self.verbose is not None:
self._ctx = use_log_level(self.verbose)
else:
self._ctx = contextlib.nullcontext()
self._data = ClosingStringIO()
self._lh = logging.StreamHandler(self._data)
self._lh.setFormatter(logging.Formatter('%(message)s'))
self._lh._mne_file_like = True # monkey patch for warn() use
_remove_close_handlers(logger)
logger.addHandler(self._lh)
self._ctx.__enter__()
return self._data
def __exit__(self, *args): # noqa: D105
self._ctx.__exit__(*args)
logger.removeHandler(self._lh)
set_log_file(None)
@contextlib.contextmanager
def _record_warnings():
# this is a helper that mostly acts like pytest.warns(None) did before
# pytest 7
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
yield w
class WrapStdOut(object):
"""Dynamically wrap to sys.stdout.
This makes packages that monkey-patch sys.stdout (e.g.doctest,
sphinx-gallery) work properly.
"""
def __getattr__(self, name): # noqa: D105
# Even more ridiculous than this class, this must be sys.stdout (not
# just stdout) in order for this to work (tested on OSX and Linux)
if hasattr(sys.stdout, name):
return getattr(sys.stdout, name)
else:
raise AttributeError("'file' object has not attribute '%s'" % name)
_verbose_dec_re = re.compile('^<decorator-gen-[0-9]+>$')
def warn(message, category=RuntimeWarning, module='mne',
ignore_namespaces=('mne',)):
"""Emit a warning with trace outside the mne namespace.
This function takes arguments like warnings.warn, and sends messages
using both ``warnings.warn`` and ``logger.warn``. Warnings can be
generated deep within nested function calls. In order to provide a
more helpful warning, this function traverses the stack until it
reaches a frame outside the ``mne`` namespace that caused the error.
Parameters
----------
message : str
Warning message.
category : instance of Warning
The warning class. Defaults to ``RuntimeWarning``.
module : str
The name of the module emitting the warning.
ignore_namespaces : list of str
Namespaces to ignore when traversing the stack.
.. versionadded:: 0.24
"""
root_dirs = [importlib.import_module(ns) for ns in ignore_namespaces]
root_dirs = [op.dirname(ns.__file__) for ns in root_dirs]
frame = None
if logger.level <= logging.WARN:
frame = inspect.currentframe()
while frame:
fname = frame.f_code.co_filename
lineno = frame.f_lineno
# in verbose dec
if not _verbose_dec_re.search(fname):
# treat tests as scripts
# and don't capture unittest/case.py (assert_raises)
if not (any(fname.startswith(rd) for rd in root_dirs) or
('unittest' in fname and 'case' in fname)) or \
op.basename(op.dirname(fname)) == 'tests':
break
frame = frame.f_back
del frame
# We need to use this instead of warn(message, category, stacklevel)
# because we move out of the MNE stack, so warnings won't properly
# recognize the module name (and our warnings.simplefilter will fail)
warnings.warn_explicit(
message, category, fname, lineno, module,
globals().get('__warningregistry__', {}))
# To avoid a duplicate warning print, we only emit the logger.warning if
# one of the handlers is a FileHandler. See gh-5592
# But it's also nice to be able to do:
# with mne.utils.use_log_level('warning', add_frames=3):
# so also check our add_frames attribute.
if any(isinstance(h, logging.FileHandler) or getattr(h, '_mne_file_like',
False)
for h in logger.handlers) or _filter.add_frames:
logger.warning(message)
def _get_call_line():
"""Get the call line from within a function."""
frame = inspect.currentframe().f_back.f_back
if _verbose_dec_re.search(frame.f_code.co_filename):
frame = frame.f_back
context = inspect.getframeinfo(frame).code_context
context = 'unknown' if context is None else context[0].strip()
return context
def filter_out_warnings(warn_record, category=None, match=None):
r"""Remove particular records from ``warn_record``.
This helper takes a list of :class:`warnings.WarningMessage` objects,
and remove those matching category and/or text.
Parameters
----------
category: WarningMessage type | None
class of the message to filter out
match : str | None
text or regex that matches the error message to filter out
"""
regexp = re.compile('.*' if match is None else match)
is_category = [w.category == category if category is not None else True
for w in warn_record._list]
is_match = [regexp.match(w.message.args[0]) is not None
for w in warn_record._list]
ind = [ind for ind, (c, m) in enumerate(zip(is_category, is_match))
if c and m]
for i in reversed(ind):
warn_record._list.pop(i)
@contextlib.contextmanager
def wrapped_stdout(indent='', cull_newlines=False):
"""Wrap stdout writes to logger.info, with an optional indent prefix.
Parameters
----------
indent : str
The indentation to add.
cull_newlines : bool
If True, cull any new/blank lines at the end.
"""
orig_stdout = sys.stdout
my_out = ClosingStringIO()
sys.stdout = my_out
try:
yield
finally:
sys.stdout = orig_stdout
pending_newlines = 0
for line in my_out.getvalue().split('\n'):
if not line.strip() and cull_newlines:
pending_newlines += 1
continue
for _ in range(pending_newlines):
logger.info('\n')
logger.info(indent + line)
def _frame_info(n):
frame = inspect.currentframe()
try:
frame = frame.f_back
infos = list()
for _ in range(n):
try:
name = frame.f_globals['__name__']
except KeyError: # in our verbose dec
pass
else:
infos.append(f'{name.lstrip("mne.")}:{frame.f_lineno}')
frame = frame.f_back
if frame is None:
break
return infos
except Exception:
return ['unknown']
finally:
del frame
| 34.233607
| 86
| 0.626302
|
33e9d81423e8b3488419c35984c414eb5317c46a
| 5,042
|
py
|
Python
|
beans/beans_bill/apps/expense/views.py
|
tjhlp/beans_code
|
d9b5eceddfd2c0028875001d7d53fd3fd222edf9
|
[
"MIT"
] | null | null | null |
beans/beans_bill/apps/expense/views.py
|
tjhlp/beans_code
|
d9b5eceddfd2c0028875001d7d53fd3fd222edf9
|
[
"MIT"
] | 4
|
2021-06-08T22:00:26.000Z
|
2022-03-12T00:40:05.000Z
|
beans/beans_bill/apps/expense/views.py
|
tjhlp/beans_code
|
d9b5eceddfd2c0028875001d7d53fd3fd222edf9
|
[
"MIT"
] | null | null | null |
from django.views import View
import datetime
from expense.models import *
from bill.models import BillInfo
from beans_bill.utils.response_code import *
from beans_bill.utils.comm_utils import *
class ExpenseListView(View):
""" 显示账户下所有消费记录"""
def post(self, request):
params = {'user_id': (1, str), 'bill_name': (0, str), 'expense_type': (0, str), 'expense_time': (0, str)}
js, code = valid_body_js(request, params)
if code != CODE_SUCCESS:
logger.error("invalid param")
return json_response(code)
req_params = {''}
try:
info_models = ExpenseInfo.objects.filter(user_id=js['user_id'])
except Exception as error:
logger.error("BillInfo got exception:%s, params:%s" % (str(error), js))
return json_response(CODE_NODE_SOURCE_MISSING)
res = []
for info_model in info_models:
# 查询账单信息
bill_info = BillInfo.objects.get(bill_id=info_model.bill_id)
rsp = {
'bill_id': info_model.bill_id,
'bill_name': bill_info.bill_name,
'expense_id': info_model.expense_id,
'expense_name': info_model.expense_name,
'expense_type': info_model.expense_type,
'expense_time': json_type(info_model.expense_time),
'expense_cost': info_model.expense_cost,
'expense_content': info_model.expense_content
}
res.append(rsp)
return json_response(CODE_SUCCESS, res)
class ExpenseAddView(View):
""" 添加消费记录"""
def post(self, request):
params = {'user_id': (1, str), 'bill_id_list': (1, list), 'expense_name': (1, str), 'expense_type': (1, str),
'expense_time': (1, str), 'expense_cost': (1, str), 'expense_content': (1, str)}
js, code = valid_body_js(request, params)
if code != CODE_SUCCESS:
logger.error("invalid param")
return json_response(code)
bill_id_list = js.pop('bill_id_list')
ex_list = []
for bill_id in bill_id_list:
js['bill_id'] = bill_id
ex_ = ExpenseInfo.objects.create(**js)
ex_list.append(ex_.expense_id)
rsp = {
'expense_id': ex_list
}
return json_response(CODE_SUCCESS, rsp)
class ExpenseRemoveView(View):
""" 删除消费记录"""
def post(self, request):
params = {'expense_id': (1, str)}
js, code = valid_body_js(request, params)
if code != CODE_SUCCESS:
logger.error("invalid param")
return json_response(code)
ExpenseInfo.objects.get(expense_id=js['expense_id']).delete()
return json_response(CODE_SUCCESS)
class ExpenseUpdateView(View):
""" 更改消费记录"""
def post(self, request):
params = {'expense_id': (1, str), 'expense_name': (0, str), 'expense_type': (0, str),
'expense_time': (0, str), 'expense_cost': (0, str), 'expense_content': (0, str)}
js, code = valid_body_js(request, params)
if code != CODE_SUCCESS:
logger.error("invalid param")
return json_response(code)
expense_id = js.pop('expense_id')
ex_ = ExpenseInfo.objects.get(expense_id=expense_id)
ex_.expense_name = js['expense_name'] if js['expense_name'] else ex_.expense_name
ex_.expense_type = js['expense_type'] if js['expense_type'] else ex_.expense_type
ex_.expense_time = js['expense_time'] if js['expense_time'] else ex_.expense_time
ex_.expense_cost = js['expense_cost'] if js['expense_cost'] else ex_.expense_cost
ex_.expense_content = js['expense_content'] if js['expense_content'] else ex_.expense_content
ex_.save()
return json_response(CODE_SUCCESS)
class ExpenseCostView(View):
""" 个人消费总额"""
def post(self, request):
params = {'user_id': (1, str)}
js, code = valid_body_js(request, params)
if code != CODE_SUCCESS:
logger.error("invalid param")
return json_response(code)
today = datetime.date.today()
last_week = today - datetime.timedelta(days=7)
last_month = today - datetime.timedelta(days=30)
last_year = today - datetime.timedelta(days=365)
week_model = ExpenseInfo.objects.filter(expense_time__gt=last_week)
week_cost = 0
for week in week_model:
week_cost += week.expense_cost
month_model = ExpenseInfo.objects.filter(expense_time__gt=last_month)
month_cost = 0
for month in month_model:
month_cost += month.expense_cost
year_model = ExpenseInfo.objects.filter(expense_time__gt=last_year)
year_cost = 0
for year in year_model:
year_cost += year.expense_cost
rsp = {
'week_cost': week_cost,
'month_cost': month_cost,
'year_cost': year_cost,
}
return json_response(CODE_SUCCESS, rsp)
| 34.772414
| 117
| 0.608092
|
e50dda574001aa520706ebc14135a8092b12f5b2
| 903
|
py
|
Python
|
src/deploy/osp_deployer/dateloghandler.py
|
kamlesh6808/JetPack
|
54871b307cc7385939dd89f9bb5e9ed9bb3036fe
|
[
"Apache-2.0"
] | 31
|
2017-11-14T02:21:18.000Z
|
2022-02-16T07:28:54.000Z
|
src/deploy/osp_deployer/dateloghandler.py
|
kamlesh6808/JetPack
|
54871b307cc7385939dd89f9bb5e9ed9bb3036fe
|
[
"Apache-2.0"
] | 324
|
2018-01-10T16:52:25.000Z
|
2021-09-23T19:02:50.000Z
|
src/deploy/osp_deployer/dateloghandler.py
|
kamlesh6808/JetPack
|
54871b307cc7385939dd89f9bb5e9ed9bb3036fe
|
[
"Apache-2.0"
] | 215
|
2017-11-01T15:50:16.000Z
|
2022-02-16T07:28:56.000Z
|
# Copyright (c) 2015-2021 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime
class Dateloghandler(logging.FileHandler):
def __init__(self, filename, mode):
path = '/auto_results/'
fname = datetime.now().strftime(".%Y.%m.%d-%H.%M")
super(Dateloghandler, self).__init__(path + filename + fname, mode)
| 34.730769
| 75
| 0.728682
|
63d412a3830715cd308ecb531bfdd9066e79132d
| 544
|
py
|
Python
|
heltour/tournament/migrations/0149_auto_20170307_1700.py
|
zbidwell/heltour
|
3895142695096a81cc65c3fefb7d4501ed796f46
|
[
"MIT"
] | 41
|
2016-08-17T19:58:42.000Z
|
2021-11-08T10:52:07.000Z
|
heltour/tournament/migrations/0149_auto_20170307_1700.py
|
zbidwell/heltour
|
3895142695096a81cc65c3fefb7d4501ed796f46
|
[
"MIT"
] | 257
|
2016-08-17T22:29:05.000Z
|
2022-01-13T00:42:05.000Z
|
heltour/tournament/migrations/0149_auto_20170307_1700.py
|
zbidwell/heltour
|
3895142695096a81cc65c3fefb7d4501ed796f46
|
[
"MIT"
] | 31
|
2016-09-23T23:36:14.000Z
|
2022-01-14T17:05:08.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-03-07 17:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tournament', '0148_player_timezone_offset'),
]
operations = [
migrations.AlterField(
model_name='player',
name='account_status',
field=models.CharField(choices=[('normal', 'Normal'), ('engine', 'Engine'), ('booster', 'Booster'), ('closed', 'Closed')], default='normal', max_length=31),
),
]
| 25.904762
| 168
| 0.599265
|
16572a15be349542b56030cf6704034960edb6c1
| 927
|
py
|
Python
|
models.py
|
realityone/ApnManger
|
696d1eb945a29c5f8cd70004fc55e5ca37e1165a
|
[
"MIT"
] | 3
|
2015-07-29T12:05:49.000Z
|
2015-10-09T07:35:23.000Z
|
models.py
|
realityone/ApnManger
|
696d1eb945a29c5f8cd70004fc55e5ca37e1165a
|
[
"MIT"
] | null | null | null |
models.py
|
realityone/ApnManger
|
696d1eb945a29c5f8cd70004fc55e5ca37e1165a
|
[
"MIT"
] | null | null | null |
from uuid import uuid4
from base64 import b64encode
class ApnProxyConfig(object):
description = 'Custom Apn Proxy Profile'
organization = 'Realityone'
def __init__(self, proxy_ip, port, name='Apn Proxy Config', username=None, password=None):
self.name = name
self.proxy_ip = proxy_ip
self.port = port
self.username = username or ''
self.password = password or ''
@property
def password(self):
return self._password
@password.setter
def password(self, value):
self._password = b64encode(value).strip()
@classmethod
def _uuid(cls):
return str(uuid4()).upper()
@property
def config_uid(self):
return self._uuid()
@property
def file_uuid(self):
return self._uuid()
if __name__ == '__main__':
apn = ApnProxyConfig('127.0.0.1', 1234)
print apn.password, apn.file_uuid, apn.config_uid
| 23.769231
| 94
| 0.640777
|
b1853774f8a6d9f149843126199dd2b1612e53b0
| 1,154
|
py
|
Python
|
src/CryptoPlus/Hash/python_SHA512.py
|
voytecPL/pycryptoplus
|
86905bbb8661e00cfb2afdc4461d4a79b6429d8a
|
[
"MIT"
] | 1
|
2022-02-27T17:46:18.000Z
|
2022-02-27T17:46:18.000Z
|
src/CryptoPlus/Hash/python_SHA512.py
|
voytecPL/pycryptoplus
|
86905bbb8661e00cfb2afdc4461d4a79b6429d8a
|
[
"MIT"
] | null | null | null |
src/CryptoPlus/Hash/python_SHA512.py
|
voytecPL/pycryptoplus
|
86905bbb8661e00cfb2afdc4461d4a79b6429d8a
|
[
"MIT"
] | null | null | null |
from .pysha512 import sha512
__all__ = ['new','digest_size']
def new(data=None):
"""Create a new pure python SHA-512 hash object
data = initial input (raw string) to the hashing object
if present, the method call update(arg) is made
EXAMPLE: FIPS 180-2
=========
>>> from CryptoPlus.Hash import python_SHA512
>>> message = b"abc"
>>> hasher = python_SHA512.new()
>>> hasher.update(message)
>>> hasher.hexdigest()
'ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f'
>>> message = b"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"
>>> hasher = python_SHA512.new()
>>> hasher.update(message)
>>> hasher.hexdigest()
'8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909'
"""
return sha512(data)
digest_size = sha512.digest_size
| 37.225806
| 138
| 0.667244
|
9365c2f3f881a98000b9f5ce774491ebb8be1336
| 4,953
|
py
|
Python
|
huaweicloud-sdk-moderation/huaweicloudsdkmoderation/v1/model/image_detection_result_body.py
|
Adek06/huaweicloud-sdk-python-v3
|
3d13b27d089e04a1ae567cd649b3c5509e0391d2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-moderation/huaweicloudsdkmoderation/v1/model/image_detection_result_body.py
|
Adek06/huaweicloud-sdk-python-v3
|
3d13b27d089e04a1ae567cd649b3c5509e0391d2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-moderation/huaweicloudsdkmoderation/v1/model/image_detection_result_body.py
|
Adek06/huaweicloud-sdk-python-v3
|
3d13b27d089e04a1ae567cd649b3c5509e0391d2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
class ImageDetectionResultBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'detail': 'ImageDetectionResultDetail',
'suggestion': 'str',
'category_suggestion': 'object'
}
attribute_map = {
'detail': 'detail',
'suggestion': 'suggestion',
'category_suggestion': 'category_suggestion'
}
def __init__(self, detail=None, suggestion=None, category_suggestion=None):
"""ImageDetectionResultBody - a model defined in huaweicloud sdk"""
self._detail = None
self._suggestion = None
self._category_suggestion = None
self.discriminator = None
if detail is not None:
self.detail = detail
if suggestion is not None:
self.suggestion = suggestion
if category_suggestion is not None:
self.category_suggestion = category_suggestion
@property
def detail(self):
"""Gets the detail of this ImageDetectionResultBody.
:return: The detail of this ImageDetectionResultBody.
:rtype: ImageDetectionResultDetail
"""
return self._detail
@detail.setter
def detail(self, detail):
"""Sets the detail of this ImageDetectionResultBody.
:param detail: The detail of this ImageDetectionResultBody.
:type: ImageDetectionResultDetail
"""
self._detail = detail
@property
def suggestion(self):
"""Gets the suggestion of this ImageDetectionResultBody.
检测结果是否通过。 block:包含敏感信息,不通过 pass:不包含敏感信息,通过 review:需要人工复检 > 说明: 当同时检测多个场景时,suggestion的值以最可能包含敏感信息的场景为准。即任一场景出现了block则总的suggestion为block,所有场景都pass时suggestion为pass,这两种情况之外则一定有场景需要review,此时suggestion为review。
:return: The suggestion of this ImageDetectionResultBody.
:rtype: str
"""
return self._suggestion
@suggestion.setter
def suggestion(self, suggestion):
"""Sets the suggestion of this ImageDetectionResultBody.
检测结果是否通过。 block:包含敏感信息,不通过 pass:不包含敏感信息,通过 review:需要人工复检 > 说明: 当同时检测多个场景时,suggestion的值以最可能包含敏感信息的场景为准。即任一场景出现了block则总的suggestion为block,所有场景都pass时suggestion为pass,这两种情况之外则一定有场景需要review,此时suggestion为review。
:param suggestion: The suggestion of this ImageDetectionResultBody.
:type: str
"""
self._suggestion = suggestion
@property
def category_suggestion(self):
"""Gets the category_suggestion of this ImageDetectionResultBody.
具体每个场景的检测结果。 block:包含敏感信息,不通过 pass:不包含敏感信息,通过 review:需要人工复检
:return: The category_suggestion of this ImageDetectionResultBody.
:rtype: object
"""
return self._category_suggestion
@category_suggestion.setter
def category_suggestion(self, category_suggestion):
"""Sets the category_suggestion of this ImageDetectionResultBody.
具体每个场景的检测结果。 block:包含敏感信息,不通过 pass:不包含敏感信息,通过 review:需要人工复检
:param category_suggestion: The category_suggestion of this ImageDetectionResultBody.
:type: object
"""
self._category_suggestion = category_suggestion
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ImageDetectionResultBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.386503
| 212
| 0.617605
|
5a659059a6176edea06696098e694f4b071736eb
| 520
|
py
|
Python
|
dataloader_test.py
|
DocyNoah/Visualizing-CNNs-for-monocular-depth-estimation
|
38f10bee89c31379c399834e89905b5d39c31764
|
[
"MIT"
] | null | null | null |
dataloader_test.py
|
DocyNoah/Visualizing-CNNs-for-monocular-depth-estimation
|
38f10bee89c31379c399834e89905b5d39c31764
|
[
"MIT"
] | null | null | null |
dataloader_test.py
|
DocyNoah/Visualizing-CNNs-for-monocular-depth-estimation
|
38f10bee89c31379c399834e89905b5d39c31764
|
[
"MIT"
] | null | null | null |
import loaddata
import torch
from PIL import Image
import numpy as np
def main():
batch_size = 8
train_loader = loaddata.getTrainingData(batch_size)
for i, sample_batched in enumerate(train_loader):
image, depth_ = sample_batched['image'], sample_batched['depth']
for j in range(batch_size):
print(image[j].shape)
np_image = np.array(image[j])
pil_image = Image.fromarray(np_image)
pil_image.show(1)
if __name__ == "__main__":
main()
| 22.608696
| 72
| 0.646154
|
76e9d0e82dabebbba90d8afb2b24b7fb2d8e4a06
| 668
|
py
|
Python
|
exercicios-turtle/.history/firstinterface_20210627212745.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | 1
|
2021-11-28T18:49:21.000Z
|
2021-11-28T18:49:21.000Z
|
exercicios-turtle/.history/firstinterface_20210627212745.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | null | null | null |
exercicios-turtle/.history/firstinterface_20210627212745.py
|
Aleff13/poo-ufsc
|
bc1574df26f840a3c0fd5b1e0c72e5d69f61493d
|
[
"MIT"
] | null | null | null |
import tkinter
#main frame
root = tkinter.Tk()
root.geometry('400x400+3200+100')
root.wm_title('Anotações')
#list box #frame
commentsFrame = tkinter.LabelFrame(root, text='Anotações')
commentsFrame.place(relwidth=1, relheight=0.5)
commentsList = tkinter.Listbox(commentsFrame)
commentsList.place(relwidth=1, relheight=0.5)
#opções LabelFrame
optionsFrame = tkinter.LabelFrame(root, text='Opções')
optionsFrame.place(relwidth=1, relheight=0.5, rely=0.5)
inputCommentFrame = tkinter.LabelFrame(optionsFrame, text='Nova anotação')
inputCommentFrame.place(relwidth=1, relheight=0.4)
inputComment = tkinter.Entry(inputCommentFrame)
input
#start app
root.mainloop()
| 23.857143
| 74
| 0.788922
|
e0ca91e280cd0e65f3c64296acbc0d32bc2389be
| 3,675
|
py
|
Python
|
kunquat/tracker/ui/model/column.py
|
cyberixae/kunquat
|
06ae72b2c1519686cc510ce887d9d45a5c3fa3a3
|
[
"CC0-1.0"
] | null | null | null |
kunquat/tracker/ui/model/column.py
|
cyberixae/kunquat
|
06ae72b2c1519686cc510ce887d9d45a5c3fa3a3
|
[
"CC0-1.0"
] | null | null | null |
kunquat/tracker/ui/model/column.py
|
cyberixae/kunquat
|
06ae72b2c1519686cc510ce887d9d45a5c3fa3a3
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Author: Tomi Jylhä-Ollila, Finland 2014
#
# This file is part of Kunquat.
#
# CC0 1.0 Universal, http://creativecommons.org/publicdomain/zero/1.0/
#
# To the extent possible under law, Kunquat Affirmers have waived all
# copyright and related or neighboring rights to Kunquat.
#
from trigger import Trigger
import tstamp
COLUMNS_MAX = 64 # TODO: define in libkunquat interface
class Column():
def __init__(self, pattern_id, col_num):
assert pattern_id
assert 0 <= col_num < COLUMNS_MAX
self._pattern_id = pattern_id
self._col_num = col_num
self._trigger_rows = None
self._store = None
self._controller = None
def __eq__(self, other):
assert isinstance(other, Column)
return (self._col_num == other._col_num) and \
(self._pattern_id == other._pattern_id)
def __ne__(self, other):
return not (self == other)
def set_controller(self, controller):
self._controller = controller
self._store = controller.get_store()
def get_trigger_row_positions(self):
self._build_trigger_rows()
return self._trigger_rows.keys()
def get_trigger_row_positions_in_range(self, start, stop):
self._build_trigger_rows()
return [ts for ts in self._trigger_rows.keys() if start <= ts < stop]
def get_trigger_count_at_row(self, row_ts):
self._build_trigger_rows()
return len(self._trigger_rows[row_ts])
def get_trigger(self, row_ts, trigger_index):
self._build_trigger_rows()
return self._trigger_rows[row_ts][trigger_index]
def has_trigger(self, row_ts, trigger_index):
try:
self.get_trigger(row_ts, trigger_index)
return True
except (KeyError, IndexError):
return False
def insert_trigger(self, row_ts, trigger_index, trigger):
self._build_trigger_rows()
if row_ts not in self._trigger_rows:
self._trigger_rows[row_ts] = []
self._trigger_rows[row_ts].insert(trigger_index, trigger)
raw_data = self._make_raw_data(self._trigger_rows)
self._store[self._get_key()] = raw_data
def remove_trigger(self, row_ts, trigger_index):
self._build_trigger_rows()
assert self.has_trigger(row_ts, trigger_index)
del self._trigger_rows[row_ts][trigger_index]
raw_data = self._make_raw_data(self._trigger_rows)
self._store[self._get_key()] = raw_data
def _build_trigger_rows(self):
if self._trigger_rows == None:
self._trigger_rows = {}
trigger_list = self._get_raw_data()
for ts_data, evspec in trigger_list:
ts = tstamp.Tstamp(ts_data)
if ts not in self._trigger_rows:
self._trigger_rows[ts] = []
trigger_type, argument = evspec
trigger = Trigger(trigger_type, argument)
self._trigger_rows[ts].append(trigger)
def _get_key(self):
key = '{}/col_{:02x}/p_triggers.json'.format(
self._pattern_id, self._col_num)
return key
def _get_raw_data(self):
key = self._get_key()
try:
triggers = self._store[key]
return triggers
except KeyError:
return []
def _make_raw_data(self, trigger_rows):
raw_data = []
for (ts, triggers) in trigger_rows.iteritems():
for trigger in triggers:
evspec = [trigger.get_type(), trigger.get_argument()]
raw_data.append([tuple(ts), evspec])
return raw_data
| 30.122951
| 77
| 0.633197
|
5048a0b75136e08b8f643b8f9017972c139a9b08
| 28,491
|
py
|
Python
|
owslib/util.py
|
mtauban/OWSLib
|
0b64e7a8f7eb9e1fca369716f9803821066bf0f3
|
[
"BSD-3-Clause"
] | null | null | null |
owslib/util.py
|
mtauban/OWSLib
|
0b64e7a8f7eb9e1fca369716f9803821066bf0f3
|
[
"BSD-3-Clause"
] | null | null | null |
owslib/util.py
|
mtauban/OWSLib
|
0b64e7a8f7eb9e1fca369716f9803821066bf0f3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2008 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
import os
import sys
from collections import OrderedDict
from dateutil import parser
from datetime import datetime, timedelta
import pytz
from owslib.etree import etree, ParseError
from owslib.namespaces import Namespaces
from urllib.parse import urlsplit, urlencode, urlparse, parse_qs, urlunparse, parse_qsl
import copy
from io import StringIO, BytesIO
import re
from copy import deepcopy
import warnings
import requests
import codecs
"""
Utility functions and classes
"""
class ServiceException(Exception):
# TODO: this should go in ows common module when refactored.
pass
# http://stackoverflow.com/questions/6256183/combine-two-dictionaries-of-dictionaries-python
def dict_union(d1, d2):
return dict((x, (dict_union(d1.get(x, {}), d2[x]) if isinstance(d2.get(x), dict) else d2.get(x, d1.get(x))))
for x in set(list(d1.keys()) + list(d2.keys())))
# Infinite DateTimes for Python. Used in SWE 2.0 and other OGC specs as "INF" and "-INF"
class InfiniteDateTime(object):
def __lt__(self, other):
return False
def __gt__(self, other):
return True
def timetuple(self):
return tuple()
class NegativeInfiniteDateTime(object):
def __lt__(self, other):
return True
def __gt__(self, other):
return False
def timetuple(self):
return tuple()
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def format_string(prop_string):
"""
Formats a property string to remove spaces and go from CamelCase to pep8
from: http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-camel-case
"""
if prop_string is None:
return ''
st_r = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', prop_string)
st_r = st_r.replace(' ', '')
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', st_r).lower()
def xml_to_dict(root, prefix=None, depth=1, diction=None):
"""
Recursively iterates through an xml element to convert each element in the tree to a (key,val).
Where key is the element tag and val is the inner-text of the element.
Note that this recursively go through the tree until the depth specified.
Parameters
===========
:root - root xml element, starting point of iteration
:prefix - a string to prepend to the resulting key (optional)
:depth - the number of depths to process in the tree (optional)
:diction - the dictionary to insert the (tag,text) pairs into (optional)
Return
=======
Dictionary of (key,value); where key is the element tag stripped of namespace and cleaned up to be pep8 and
value is the inner-text of the element. Note that duplicate elements will be replaced by the last element of the
same tag in the tree.
"""
ret = diction if diction is not None else dict()
for child in root:
val = testXMLValue(child)
# skip values that are empty or None
if val is None or val == '':
if depth > 1:
ret = xml_to_dict(child, prefix=prefix, depth=(depth - 1), diction=ret)
continue
key = format_string(child.tag.split('}')[-1])
if prefix is not None:
key = prefix + key
ret[key] = val
if depth > 1:
ret = xml_to_dict(child, prefix=prefix, depth=(depth - 1), diction=ret)
return ret
class ResponseWrapper(object):
"""
Return object type from openURL.
Provides a thin shim around requests response object to maintain code compatibility.
"""
def __init__(self, response):
self._response = response
def info(self):
return self._response.headers
def read(self):
return self._response.content
def geturl(self):
return self._response.url.replace('&&', '&')
# @TODO: __getattribute__ for poking at response
def openURL(url_base, data=None, method='Get', cookies=None, username=None, password=None, timeout=30, headers=None,
verify=True, cert=None, auth=None):
"""
Function to open URLs.
Uses requests library but with additional checks for OGC service exceptions and url formatting.
Also handles cookies and simple user password authentication.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided.
Defaults to ``True``.
:param cert: (optional) A file with a client side certificate for SSL authentication
to send with the :class:`Request`.
:param auth: Instance of owslib.util.Authentication
"""
headers = headers if headers is not None else {}
rkwargs = {}
rkwargs['timeout'] = timeout
if auth:
if username:
auth.username = username
if password:
auth.password = password
if cert:
auth.cert = cert
verify = verify and auth.verify
else:
auth = Authentication(username, password, cert, verify)
if auth.username and auth.password:
rkwargs['auth'] = (auth.username, auth.password)
rkwargs['cert'] = auth.cert
rkwargs['verify'] = verify
# FIXUP for WFS in particular, remove xml style namespace
# @TODO does this belong here?
method = method.split("}")[-1]
if method.lower() == 'post':
try:
etree.fromstring(data)
headers['Content-Type'] = 'text/xml'
except (ParseError, UnicodeEncodeError):
pass
rkwargs['data'] = data
elif method.lower() == 'get':
rkwargs['params'] = data
else:
raise ValueError("Unknown method ('%s'), expected 'get' or 'post'" % method)
if cookies is not None:
rkwargs['cookies'] = cookies
req = requests.request(method.upper(), url_base, headers=headers, **rkwargs)
if req.status_code in [400, 401]:
raise ServiceException(req.text)
if req.status_code in [404, 500, 502, 503, 504]: # add more if needed
req.raise_for_status()
# check for service exceptions without the http header set
if 'Content-Type' in req.headers and \
req.headers['Content-Type'] in ['text/xml', 'application/xml', 'application/vnd.ogc.se_xml']:
# just in case 400 headers were not set, going to have to read the xml to see if it's an exception report.
se_tree = etree.fromstring(req.content)
# to handle the variety of namespaces and terms across services
# and versions, especially for "legacy" responses like WMS 1.3.0
possible_errors = [
'{http://www.opengis.net/ows}Exception',
'{http://www.opengis.net/ows/1.1}Exception',
'{http://www.opengis.net/ogc}ServiceException',
'ServiceException'
]
for possible_error in possible_errors:
serviceException = se_tree.find(possible_error)
if serviceException is not None:
# and we need to deal with some message nesting
raise ServiceException('\n'.join([t.strip() for t in serviceException.itertext() if t.strip()]))
return ResponseWrapper(req)
# default namespace for nspath is OWS common
OWS_NAMESPACE = 'http://www.opengis.net/ows/1.1'
def nspath(path, ns=OWS_NAMESPACE):
"""
Prefix the given path with the given namespace identifier.
Parameters
----------
- path: ElementTree API Compatible path expression
- ns: the XML namespace URI.
"""
if ns is None or path is None:
return -1
components = []
for component in path.split('/'):
if component != '*':
component = '{%s}%s' % (ns, component)
components.append(component)
return '/'.join(components)
def nspath_eval(xpath, namespaces):
''' Return an etree friendly xpath '''
out = []
for chunks in xpath.split('/'):
namespace, element = chunks.split(':')
out.append('{%s}%s' % (namespaces[namespace], element))
return '/'.join(out)
def cleanup_namespaces(element):
""" Remove unused namespaces from an element """
if etree.__name__ == 'lxml.etree':
etree.cleanup_namespaces(element)
return element
else:
return etree.fromstring(etree.tostring(element))
def add_namespaces(root, ns_keys):
if isinstance(ns_keys, str):
ns_keys = [ns_keys]
namespaces = Namespaces()
ns_keys = [(x, namespaces.get_namespace(x)) for x in ns_keys]
if etree.__name__ != 'lxml.etree':
# We can just add more namespaces when not using lxml.
# We can't re-add an existing namespaces. Get a list of current
# namespaces in use
existing_namespaces = set()
for elem in root.iter():
if elem.tag[0] == "{":
uri, tag = elem.tag[1:].split("}")
existing_namespaces.add(namespaces.get_namespace_from_url(uri))
for key, link in ns_keys:
if link is not None and key not in existing_namespaces:
root.set("xmlns:%s" % key, link)
return root
else:
# lxml does not support setting xmlns attributes
# Update the elements nsmap with new namespaces
new_map = root.nsmap
for key, link in ns_keys:
if link is not None:
new_map[key] = link
# Recreate the root element with updated nsmap
new_root = etree.Element(root.tag, nsmap=new_map)
# Carry over attributes
for a, v in list(root.items()):
new_root.set(a, v)
# Carry over children
for child in root:
new_root.append(deepcopy(child))
return new_root
def getXMLInteger(elem, tag):
"""
Return the text within the named tag as an integer.
Raises an exception if the tag cannot be found or if its textual
value cannot be converted to an integer.
Parameters
----------
- elem: the element to search within
- tag: the name of the tag to look for
"""
e = elem.find(tag)
if e is None:
raise ValueError('Missing %s in %s' % (tag, elem))
return int(e.text.strip())
def testXMLValue(val, attrib=False):
"""
Test that the XML value exists, return val.text, else return None
Parameters
----------
- val: the value to be tested
"""
if val is not None:
if attrib:
return val.strip()
elif val.text:
return val.text.strip()
else:
return None
else:
return None
def testXMLAttribute(element, attribute):
"""
Test that the XML element and attribute exist, return attribute's value, else return None
Parameters
----------
- element: the element containing the attribute
- attribute: the attribute name
"""
if element is not None:
return element.get(attribute)
return None
def http_post(url=None, request=None, lang='en-US', timeout=10, username=None, password=None, auth=None):
"""
Invoke an HTTP POST request
Parameters
----------
- url: the URL of the server
- request: the request message
- lang: the language
- timeout: timeout in seconds
"""
if url is None:
raise ValueError("URL required")
u = urlsplit(url)
headers = {
'User-Agent': 'parcelle-recs',
'Content-type': 'text/xml',
'Accept': 'text/xml',
'Accept-Language': lang,
'Accept-Encoding': 'gzip,deflate',
'Host': u.netloc,
}
rkwargs = {}
if auth:
if username:
auth.username = username
if password:
auth.password = password
else:
auth = Authentication(username, password)
if auth.username is not None and auth.password is not None:
rkwargs['auth'] = (auth.username, auth.password)
rkwargs['verify'] = auth.verify
rkwargs['cert'] = auth.cert
up = requests.post(url, request, headers=headers, **rkwargs)
return up.content
def http_get(*args, **kwargs):
# Copy input kwargs so the dict can be modified
rkwargs = copy.deepcopy(kwargs)
# Use Authentication instance if provided, else create one
auth = rkwargs.pop('auth', None)
if auth is not None:
if isinstance(auth, (tuple, list)):
auth = Authentication(*auth)
else:
auth = Authentication()
# Populate values with other arguments supplied
if 'username' in rkwargs:
auth.username = rkwargs.pop('username')
if 'password' in rkwargs:
auth.password = rkwargs.pop('password')
if 'cert' in rkwargs:
auth.cert = rkwargs.pop('cert')
if 'verify' in rkwargs:
auth.verify = rkwargs.pop('verify')
# Build keyword args for call to requests.get()
if auth.username and auth.password:
rkwargs.setdefault('auth', (auth.username, auth.password))
else:
rkwargs.setdefault('auth', None)
rkwargs.setdefault('cert', rkwargs.get('cert'))
rkwargs.setdefault('verify', rkwargs.get('verify', True))
return requests.get(*args, **rkwargs)
def element_to_string(element, encoding=None, xml_declaration=False):
"""
Returns a string from a XML object
Parameters
----------
- element: etree Element
- encoding (optional): encoding in string form. 'utf-8', 'ISO-8859-1', etc.
- xml_declaration (optional): whether to include xml declaration
"""
output = None
if encoding is None:
encoding = "ISO-8859-1"
if etree.__name__ == 'lxml.etree':
if xml_declaration:
if encoding in ['unicode', 'utf-8']:
output = '<?xml version="1.0" encoding="utf-8" standalone="no"?>\n{}'.format(
etree.tostring(element, encoding='unicode'))
else:
output = etree.tostring(element, encoding=encoding, xml_declaration=True)
else:
output = etree.tostring(element)
else:
if xml_declaration:
output = '<?xml version="1.0" encoding="{}" standalone="no"?>\n{}'.format(
encoding, etree.tostring(element, encoding=encoding))
else:
output = etree.tostring(element)
return output
def xml2string(xml):
"""
Return a string of XML object
Parameters
----------
- xml: xml string
"""
warnings.warn("DEPRECIATION WARNING! You should now use the 'element_to_string' method \
The 'xml2string' method will be removed in a future version of OWSLib.")
return '<?xml version="1.0" encoding="ISO-8859-1" standalone="no"?>\n' + xml
def xmlvalid(xml, xsd):
"""
Test whether an XML document is valid
Parameters
----------
- xml: XML content
- xsd: pointer to XML Schema (local file path or URL)
"""
xsd1 = etree.parse(xsd)
xsd2 = etree.XMLSchema(xsd1)
doc = etree.parse(StringIO(xml))
return xsd2.validate(doc)
def xmltag_split(tag):
''' Return XML element bare tag name (without prefix) '''
try:
return tag.split('}')[1]
except Exception:
return tag
def getNamespace(element):
''' Utility method to extract the namespace from an XML element tag encoded as {namespace}localname. '''
if element.tag[0] == '{':
return element.tag[1:].split("}")[0]
else:
return ""
def build_get_url(base_url, params, overwrite=False):
''' Utility function to build a full HTTP GET URL from the service base URL and a dictionary of HTTP parameters.
TODO: handle parameters case-insensitive?
@param overwrite: boolean flag to allow overwrite of parameters of the base_url (default: False)
'''
qs_base = []
if base_url.find('?') != -1:
qs_base = parse_qsl(base_url.split('?')[1])
qs_params = []
for key, value in list(params.items()):
qs_params.append((key, value))
qs = qs_add = []
if overwrite is True:
# all params and additional base
qs = qs_params
qs_add = qs_base
else:
# all base and additional params
qs = qs_base
qs_add = qs_params
pars = [x[0] for x in qs]
for key, value in qs_add:
if key not in pars:
qs.append((key, value))
urlqs = urlencode(tuple(qs))
return base_url.split('?')[0] + '?' + urlqs
def dump(obj, prefix=''):
'''Utility function to print to standard output a generic object with all its attributes.'''
print(("{} {}.{} : {}".format(prefix, obj.__module__, obj.__class__.__name__, obj.__dict__)))
def getTypedValue(data_type, value):
'''Utility function to cast a string value to the appropriate XSD type. '''
# If the default value is empty
if value is None:
return
if data_type == 'boolean':
return True if value.lower() == 'true' else False
elif data_type == 'integer':
return int(value)
elif data_type == 'float':
return float(value)
elif data_type == 'string':
return str(value)
else:
return value # no type casting
def extract_time(element):
''' return a datetime object based on a gml text string
ex:
<gml:beginPosition>2006-07-27T21:10:00Z</gml:beginPosition>
<gml:endPosition indeterminatePosition="now"/>
If there happens to be a strange element with both attributes and text,
use the text.
ex: <gml:beginPosition indeterminatePosition="now">2006-07-27T21:10:00Z</gml:beginPosition>
Would be 2006-07-27T21:10:00Z, not 'now'
'''
if element is None:
return None
try:
dt = parser.parse(element.text)
except Exception:
att = testXMLValue(element.attrib.get('indeterminatePosition'), True)
if att and att == 'now':
dt = datetime.utcnow()
dt.replace(tzinfo=pytz.utc)
else:
dt = None
return dt
def extract_xml_list(elements):
"""
Some people don't have seperate tags for their keywords and seperate them with
a newline. This will extract out all of the keywords correctly.
"""
keywords = (re.split(r'[\n\r]+', f.text) for f in elements if f.text)
flattened = (item.strip() for sublist in keywords for item in sublist)
remove_blank = [_f for _f in flattened if _f]
return remove_blank
def strip_bom(raw_text):
""" return the raw (assumed) xml response without the BOM
"""
boms = [
# utf-8
codecs.BOM_UTF8,
# utf-16
codecs.BOM,
codecs.BOM_BE,
codecs.BOM_LE,
codecs.BOM_UTF16,
codecs.BOM_UTF16_LE,
codecs.BOM_UTF16_BE,
# utf-32
codecs.BOM_UTF32,
codecs.BOM_UTF32_LE,
codecs.BOM_UTF32_BE
]
if isinstance(raw_text, bytes):
for bom in boms:
if raw_text.startswith(bom):
return raw_text[len(bom):]
return raw_text
def clean_ows_url(url):
"""
clean an OWS URL of basic service elements
source: https://stackoverflow.com/a/11640565
"""
if url is None or not url.startswith('http'):
return url
filtered_kvp = {}
basic_service_elements = ('service', 'version', 'request')
parsed = urlparse(url)
qd = parse_qs(parsed.query, keep_blank_values=True)
for key, value in list(qd.items()):
if key.lower() not in basic_service_elements:
filtered_kvp[key] = value
newurl = urlunparse([
parsed.scheme,
parsed.netloc,
parsed.path,
parsed.params,
urlencode(filtered_kvp, doseq=True),
parsed.fragment
])
return newurl
def bind_url(url):
"""binds an HTTP GET query string endpiont"""
if url.find('?') == -1: # like http://host/wms
binder = '?'
# if like http://host/wms?foo=bar& or http://host/wms?foo=bar
if url.find('=') != -1:
if url.find('&', -1) != -1: # like http://host/wms?foo=bar&
binder = ''
else: # like http://host/wms?foo=bar
binder = '&'
# if like http://host/wms?foo
if url.find('?') != -1:
if url.find('?', -1) != -1: # like http://host/wms?
binder = ''
elif url.find('&', -1) == -1: # like http://host/wms?foo=bar
binder = '&'
return '%s%s' % (url, binder)
import logging
# Null logging handler
NullHandler = logging.NullHandler
log = logging.getLogger('owslib')
log.addHandler(NullHandler())
def which_etree():
"""decipher which etree library is being used by OWSLib"""
which_etree = None
if 'lxml' in etree.__file__:
which_etree = 'lxml.etree'
elif 'xml/etree' in etree.__file__:
which_etree = 'xml.etree'
elif 'elementree' in etree.__file__:
which_etree = 'elementtree.ElementTree'
return which_etree
def findall(root, xpath, attribute_name=None, attribute_value=None):
"""Find elements recursively from given root element based on
xpath and possibly given attribute
:param root: Element root element where to start search
:param xpath: xpath defintion, like {http://foo/bar/namespace}ElementName
:param attribute_name: name of possible attribute of given element
:param attribute_value: value of the attribute
:return: list of elements or None
"""
found_elements = []
if attribute_name is not None and attribute_value is not None:
xpath = '%s[@%s="%s"]' % (xpath, attribute_name, attribute_value)
found_elements = root.findall('.//' + xpath)
if found_elements == []:
found_elements = None
return found_elements
def datetime_from_iso(iso):
"""returns a datetime object from dates in the format 2001-07-01T00:00:00Z or 2001-07-01T00:00:00.000Z """
try:
iso_datetime = datetime.strptime(iso, "%Y-%m-%dT%H:%M:%SZ")
except Exception:
iso_datetime = datetime.strptime(iso, "%Y-%m-%dT%H:%M:%S.%fZ")
return iso_datetime
def datetime_from_ansi(ansi):
"""Converts an ansiDate (expressed as a number = the nuber of days since the datum origin of ansi)
to a python datetime object.
"""
datumOrigin = datetime(1600, 12, 31, 0, 0, 0)
return datumOrigin + timedelta(ansi)
def is_number(s):
"""simple helper to test if value is number as requests with numbers don't
need quote marks
"""
try:
float(s)
return True
except ValueError:
return False
def makeString(value):
# using repr unconditionally breaks things in some circumstances if a
# value is already a string
if type(value) is not str:
sval = repr(value)
else:
sval = value
return sval
def param_list_to_url_string(param_list, param_name):
"""Converts list of tuples for certain WCS GetCoverage keyword arguments
(subsets, resolutions, sizes) to a url-encoded string
"""
string = ''
for param in param_list:
if len(param) > 2:
if not is_number(param[1]):
string += "&" + urlencode({param_name: param[0] + '("' + makeString(param[1]) + '","' + makeString(param[2]) + '")'}) # noqa
else:
string += "&" + urlencode({param_name: param[0] + "(" + makeString(param[1]) + "," + makeString(param[2]) + ")"}) # noqa
else:
if not is_number(param[1]):
string += "&" + urlencode({param_name: param[0] + '("' + makeString(param[1]) + '")'}) # noqa
else:
string += "&" + urlencode({param_name: param[0] + "(" + makeString(param[1]) + ")"}) # noqa
return string
def is_vector_grid(grid_elem):
pass
class Authentication(object):
_USERNAME = None
_PASSWORD = None
_CERT = None
_VERIFY = None
def __init__(self, username=None, password=None,
cert=None, verify=True, shared=False):
'''
:param str username=None: Username for basic authentication, None for
unauthenticated access (or if using cert/verify)
:param str password=None: Password for basic authentication, None for
unauthenticated access (or if using cert/verify)
:param cert=None: Either a str (path to a combined certificate/key) or
tuple/list of paths (certificate, key). If supplied, the target
files must exist.
:param verify=True: Either a bool (verify SSL certificates, use system
CA bundle) or str (path to a specific CA bundle). If a str, the
target file must exist.
:param bool shared=False: Set to True to make the values be class-level
attributes (shared among instances where shared=True) instead of
instance-level (shared=False, default)
'''
self.shared = shared
self.username = username
self.password = password
self.cert = cert
self.verify = verify
@property
def username(self):
if self.shared:
return self._USERNAME
return self._username
@username.setter
def username(self, value):
if value is None:
pass
elif not isinstance(value, str):
raise TypeError('Value for "username" must be a str')
if self.shared:
self.__class__._USERNAME = value
else:
self._username = value
@property
def password(self):
if self.shared:
return self._PASSWORD
return self._password
@password.setter
def password(self, value):
if value is None:
pass
elif not isinstance(value, str):
raise TypeError('Value for "password" must be a str')
if self.shared:
self.__class__._PASSWORD = value
else:
self._password = value
@property
def cert(self):
if self.shared:
return self._CERT
return self._cert
@cert.setter
def cert(self, certificate, key=None):
error = 'Value for "cert" must be a str path to a file or list/tuple of str paths'
value = None
if certificate is None:
value = certificate
elif isinstance(certificate, (list, tuple)):
for _ in certificate:
if not isinstance(_, str):
raise TypeError(error)
os.stat(_) # Raises OSError/FileNotFoundError if missing
# Both paths supplied as same argument
value = tuple(certificate)
elif isinstance(certificate, str):
os.stat(certificate) # Raises OSError/FileNotFoundError if missing
if isinstance(key, str):
# Separate files for certificate and key
value = (certificate, key)
else:
# Assume combined file of both certificate and key
value = certificate
else:
raise TypeError(error)
if self.shared:
self.__class__._CERT = value
else:
self._cert = value
@property
def verify(self):
if self.shared:
return self._VERIFY
return self._verify
@verify.setter
def verify(self, value):
if value is None:
pass # Passthrough when clearing the value
elif not isinstance(value, (bool, str)):
raise TypeError(
'Value for "verify" must a bool or str path to a file')
elif isinstance(value, str):
os.stat(value) # Raises OSError/FileNotFoundError if missing
if self.shared:
self.__class__._VERIFY = value
else:
self._verify = value
@property
def urlopen_kwargs(self):
return {
'username': self.username,
'password': self.password,
'cert': self.cert,
'verify': self.verify
}
def __repr__(self, *args, **kwargs):
return '<{} shared={} username={} password={} cert={} verify={}>'.format(
self.__class__.__name__, self.shared, self.username, self.password, self.cert, self.verify)
| 29.493789
| 141
| 0.609034
|
4e0a39dc1ec7bc3a6ba8d908a09df47af61c9fe8
| 19,925
|
py
|
Python
|
tests/test_completion_engine.py
|
zzl0/litecli
|
cd5d4e0cf9164a147c0c59f6f559347c851db5d6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_completion_engine.py
|
zzl0/litecli
|
cd5d4e0cf9164a147c0c59f6f559347c851db5d6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_completion_engine.py
|
zzl0/litecli
|
cd5d4e0cf9164a147c0c59f6f559347c851db5d6
|
[
"BSD-3-Clause"
] | null | null | null |
from litecli.packages.completion_engine import suggest_type
import pytest
def sorted_dicts(dicts):
"""input is a list of dicts."""
return sorted(tuple(x.items()) for x in dicts)
def test_select_suggests_cols_with_visible_table_scope():
suggestions = suggest_type("SELECT FROM tabl", "SELECT ")
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["tabl"]},
{"type": "column", "tables": [(None, "tabl", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
def test_select_suggests_cols_with_qualified_table_scope():
suggestions = suggest_type("SELECT FROM sch.tabl", "SELECT ")
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["tabl"]},
{"type": "column", "tables": [("sch", "tabl", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
@pytest.mark.parametrize(
"expression",
[
"SELECT * FROM tabl WHERE ",
"SELECT * FROM tabl WHERE (",
"SELECT * FROM tabl WHERE foo = ",
"SELECT * FROM tabl WHERE bar OR ",
"SELECT * FROM tabl WHERE foo = 1 AND ",
"SELECT * FROM tabl WHERE (bar > 10 AND ",
"SELECT * FROM tabl WHERE (bar AND (baz OR (qux AND (",
"SELECT * FROM tabl WHERE 10 < ",
"SELECT * FROM tabl WHERE foo BETWEEN ",
"SELECT * FROM tabl WHERE foo BETWEEN foo AND ",
],
)
def test_where_suggests_columns_functions(expression):
suggestions = suggest_type(expression, expression)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["tabl"]},
{"type": "column", "tables": [(None, "tabl", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
@pytest.mark.parametrize(
"expression",
["SELECT * FROM tabl WHERE foo IN (", "SELECT * FROM tabl WHERE foo IN (bar, "],
)
def test_where_in_suggests_columns(expression):
suggestions = suggest_type(expression, expression)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["tabl"]},
{"type": "column", "tables": [(None, "tabl", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
def test_where_equals_any_suggests_columns_or_keywords():
text = "SELECT * FROM tabl WHERE foo = ANY("
suggestions = suggest_type(text, text)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["tabl"]},
{"type": "column", "tables": [(None, "tabl", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
def test_lparen_suggests_cols():
suggestion = suggest_type("SELECT MAX( FROM tbl", "SELECT MAX(")
assert suggestion == [{"type": "column", "tables": [(None, "tbl", None)]}]
def test_operand_inside_function_suggests_cols1():
suggestion = suggest_type("SELECT MAX(col1 + FROM tbl", "SELECT MAX(col1 + ")
assert suggestion == [{"type": "column", "tables": [(None, "tbl", None)]}]
def test_operand_inside_function_suggests_cols2():
suggestion = suggest_type(
"SELECT MAX(col1 + col2 + FROM tbl", "SELECT MAX(col1 + col2 + "
)
assert suggestion == [{"type": "column", "tables": [(None, "tbl", None)]}]
def test_select_suggests_cols_and_funcs():
suggestions = suggest_type("SELECT ", "SELECT ")
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": []},
{"type": "column", "tables": []},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
@pytest.mark.parametrize(
"expression",
[
"SELECT * FROM ",
"INSERT INTO ",
"COPY ",
"UPDATE ",
"DESCRIBE ",
"DESC ",
"EXPLAIN ",
"SELECT * FROM foo JOIN ",
],
)
def test_expression_suggests_tables_views_and_schemas(expression):
suggestions = suggest_type(expression, expression)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "table", "schema": []},
{"type": "view", "schema": []},
{"type": "schema"},
]
)
@pytest.mark.parametrize(
"expression",
[
"SELECT * FROM sch.",
"INSERT INTO sch.",
"COPY sch.",
"UPDATE sch.",
"DESCRIBE sch.",
"DESC sch.",
"EXPLAIN sch.",
"SELECT * FROM foo JOIN sch.",
],
)
def test_expression_suggests_qualified_tables_views_and_schemas(expression):
suggestions = suggest_type(expression, expression)
assert sorted_dicts(suggestions) == sorted_dicts(
[{"type": "table", "schema": "sch"}, {"type": "view", "schema": "sch"}]
)
def test_truncate_suggests_tables_and_schemas():
suggestions = suggest_type("TRUNCATE ", "TRUNCATE ")
assert sorted_dicts(suggestions) == sorted_dicts(
[{"type": "table", "schema": []}, {"type": "schema"}]
)
def test_truncate_suggests_qualified_tables():
suggestions = suggest_type("TRUNCATE sch.", "TRUNCATE sch.")
assert sorted_dicts(suggestions) == sorted_dicts(
[{"type": "table", "schema": "sch"}]
)
def test_distinct_suggests_cols():
suggestions = suggest_type("SELECT DISTINCT ", "SELECT DISTINCT ")
assert suggestions == [{"type": "column", "tables": []}]
def test_col_comma_suggests_cols():
suggestions = suggest_type("SELECT a, b, FROM tbl", "SELECT a, b,")
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["tbl"]},
{"type": "column", "tables": [(None, "tbl", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
def test_table_comma_suggests_tables_and_schemas():
suggestions = suggest_type("SELECT a, b FROM tbl1, ", "SELECT a, b FROM tbl1, ")
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "table", "schema": []},
{"type": "view", "schema": []},
{"type": "schema"},
]
)
def test_into_suggests_tables_and_schemas():
suggestion = suggest_type("INSERT INTO ", "INSERT INTO ")
assert sorted_dicts(suggestion) == sorted_dicts(
[
{"type": "table", "schema": []},
{"type": "view", "schema": []},
{"type": "schema"},
]
)
def test_insert_into_lparen_suggests_cols():
suggestions = suggest_type("INSERT INTO abc (", "INSERT INTO abc (")
assert suggestions == [{"type": "column", "tables": [(None, "abc", None)]}]
def test_insert_into_lparen_partial_text_suggests_cols():
suggestions = suggest_type("INSERT INTO abc (i", "INSERT INTO abc (i")
assert suggestions == [{"type": "column", "tables": [(None, "abc", None)]}]
def test_insert_into_lparen_comma_suggests_cols():
suggestions = suggest_type("INSERT INTO abc (id,", "INSERT INTO abc (id,")
assert suggestions == [{"type": "column", "tables": [(None, "abc", None)]}]
def test_partially_typed_col_name_suggests_col_names():
suggestions = suggest_type(
"SELECT * FROM tabl WHERE col_n", "SELECT * FROM tabl WHERE col_n"
)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["tabl"]},
{"type": "column", "tables": [(None, "tabl", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
def test_dot_suggests_cols_of_a_table_or_schema_qualified_table():
suggestions = suggest_type("SELECT tabl. FROM tabl", "SELECT tabl.")
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "column", "tables": [(None, "tabl", None)]},
{"type": "table", "schema": "tabl"},
{"type": "view", "schema": "tabl"},
{"type": "function", "schema": "tabl"},
]
)
def test_dot_suggests_cols_of_an_alias():
suggestions = suggest_type("SELECT t1. FROM tabl1 t1, tabl2 t2", "SELECT t1.")
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "table", "schema": "t1"},
{"type": "view", "schema": "t1"},
{"type": "column", "tables": [(None, "tabl1", "t1")]},
{"type": "function", "schema": "t1"},
]
)
def test_dot_col_comma_suggests_cols_or_schema_qualified_table():
suggestions = suggest_type(
"SELECT t1.a, t2. FROM tabl1 t1, tabl2 t2", "SELECT t1.a, t2."
)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "column", "tables": [(None, "tabl2", "t2")]},
{"type": "table", "schema": "t2"},
{"type": "view", "schema": "t2"},
{"type": "function", "schema": "t2"},
]
)
@pytest.mark.parametrize(
"expression",
[
"SELECT * FROM (",
"SELECT * FROM foo WHERE EXISTS (",
"SELECT * FROM foo WHERE bar AND NOT EXISTS (",
"SELECT 1 AS",
],
)
def test_sub_select_suggests_keyword(expression):
suggestion = suggest_type(expression, expression)
assert suggestion == [{"type": "keyword"}]
@pytest.mark.parametrize(
"expression",
[
"SELECT * FROM (S",
"SELECT * FROM foo WHERE EXISTS (S",
"SELECT * FROM foo WHERE bar AND NOT EXISTS (S",
],
)
def test_sub_select_partial_text_suggests_keyword(expression):
suggestion = suggest_type(expression, expression)
assert suggestion == [{"type": "keyword"}]
def test_outer_table_reference_in_exists_subquery_suggests_columns():
q = "SELECT * FROM foo f WHERE EXISTS (SELECT 1 FROM bar WHERE f."
suggestions = suggest_type(q, q)
assert suggestions == [
{"type": "column", "tables": [(None, "foo", "f")]},
{"type": "table", "schema": "f"},
{"type": "view", "schema": "f"},
{"type": "function", "schema": "f"},
]
@pytest.mark.parametrize(
"expression",
[
"SELECT * FROM (SELECT * FROM ",
"SELECT * FROM foo WHERE EXISTS (SELECT * FROM ",
"SELECT * FROM foo WHERE bar AND NOT EXISTS (SELECT * FROM ",
],
)
def test_sub_select_table_name_completion(expression):
suggestion = suggest_type(expression, expression)
assert sorted_dicts(suggestion) == sorted_dicts(
[
{"type": "table", "schema": []},
{"type": "view", "schema": []},
{"type": "schema"},
]
)
def test_sub_select_col_name_completion():
suggestions = suggest_type(
"SELECT * FROM (SELECT FROM abc", "SELECT * FROM (SELECT "
)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["abc"]},
{"type": "column", "tables": [(None, "abc", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
@pytest.mark.xfail
def test_sub_select_multiple_col_name_completion():
suggestions = suggest_type(
"SELECT * FROM (SELECT a, FROM abc", "SELECT * FROM (SELECT a, "
)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "column", "tables": [(None, "abc", None)]},
{"type": "function", "schema": []},
]
)
def test_sub_select_dot_col_name_completion():
suggestions = suggest_type(
"SELECT * FROM (SELECT t. FROM tabl t", "SELECT * FROM (SELECT t."
)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "column", "tables": [(None, "tabl", "t")]},
{"type": "table", "schema": "t"},
{"type": "view", "schema": "t"},
{"type": "function", "schema": "t"},
]
)
@pytest.mark.parametrize("join_type", ["", "INNER", "LEFT", "RIGHT OUTER"])
@pytest.mark.parametrize("tbl_alias", ["", "foo"])
def test_join_suggests_tables_and_schemas(tbl_alias, join_type):
text = "SELECT * FROM abc {0} {1} JOIN ".format(tbl_alias, join_type)
suggestion = suggest_type(text, text)
assert sorted_dicts(suggestion) == sorted_dicts(
[
{"type": "table", "schema": []},
{"type": "view", "schema": []},
{"type": "schema"},
]
)
@pytest.mark.parametrize(
"sql",
[
"SELECT * FROM abc a JOIN def d ON a.",
"SELECT * FROM abc a JOIN def d ON a.id = d.id AND a.",
],
)
def test_join_alias_dot_suggests_cols1(sql):
suggestions = suggest_type(sql, sql)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "column", "tables": [(None, "abc", "a")]},
{"type": "table", "schema": "a"},
{"type": "view", "schema": "a"},
{"type": "function", "schema": "a"},
]
)
@pytest.mark.parametrize(
"sql",
[
"SELECT * FROM abc a JOIN def d ON a.id = d.",
"SELECT * FROM abc a JOIN def d ON a.id = d.id AND a.id2 = d.",
],
)
def test_join_alias_dot_suggests_cols2(sql):
suggestions = suggest_type(sql, sql)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "column", "tables": [(None, "def", "d")]},
{"type": "table", "schema": "d"},
{"type": "view", "schema": "d"},
{"type": "function", "schema": "d"},
]
)
@pytest.mark.parametrize(
"sql",
[
"select a.x, b.y from abc a join bcd b on ",
"select a.x, b.y from abc a join bcd b on a.id = b.id OR ",
],
)
def test_on_suggests_aliases(sql):
suggestions = suggest_type(sql, sql)
assert suggestions == [{"type": "alias", "aliases": ["a", "b"]}]
@pytest.mark.parametrize(
"sql",
[
"select abc.x, bcd.y from abc join bcd on ",
"select abc.x, bcd.y from abc join bcd on abc.id = bcd.id AND ",
],
)
def test_on_suggests_tables(sql):
suggestions = suggest_type(sql, sql)
assert suggestions == [{"type": "alias", "aliases": ["abc", "bcd"]}]
@pytest.mark.parametrize(
"sql",
[
"select a.x, b.y from abc a join bcd b on a.id = ",
"select a.x, b.y from abc a join bcd b on a.id = b.id AND a.id2 = ",
],
)
def test_on_suggests_aliases_right_side(sql):
suggestions = suggest_type(sql, sql)
assert suggestions == [{"type": "alias", "aliases": ["a", "b"]}]
@pytest.mark.parametrize(
"sql",
[
"select abc.x, bcd.y from abc join bcd on ",
"select abc.x, bcd.y from abc join bcd on abc.id = bcd.id and ",
],
)
def test_on_suggests_tables_right_side(sql):
suggestions = suggest_type(sql, sql)
assert suggestions == [{"type": "alias", "aliases": ["abc", "bcd"]}]
@pytest.mark.parametrize("col_list", ["", "col1, "])
def test_join_using_suggests_common_columns(col_list):
text = "select * from abc inner join def using (" + col_list
assert suggest_type(text, text) == [
{
"type": "column",
"tables": [(None, "abc", None), (None, "def", None)],
"drop_unique": True,
}
]
def test_2_statements_2nd_current():
suggestions = suggest_type(
"select * from a; select * from ", "select * from a; select * from "
)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "table", "schema": []},
{"type": "view", "schema": []},
{"type": "schema"},
]
)
suggestions = suggest_type(
"select * from a; select from b", "select * from a; select "
)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["b"]},
{"type": "column", "tables": [(None, "b", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
# Should work even if first statement is invalid
suggestions = suggest_type(
"select * from; select * from ", "select * from; select * from "
)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "table", "schema": []},
{"type": "view", "schema": []},
{"type": "schema"},
]
)
def test_2_statements_1st_current():
suggestions = suggest_type("select * from ; select * from b", "select * from ")
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "table", "schema": []},
{"type": "view", "schema": []},
{"type": "schema"},
]
)
suggestions = suggest_type("select from a; select * from b", "select ")
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["a"]},
{"type": "column", "tables": [(None, "a", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
def test_3_statements_2nd_current():
suggestions = suggest_type(
"select * from a; select * from ; select * from c",
"select * from a; select * from ",
)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "table", "schema": []},
{"type": "view", "schema": []},
{"type": "schema"},
]
)
suggestions = suggest_type(
"select * from a; select from b; select * from c", "select * from a; select "
)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "alias", "aliases": ["b"]},
{"type": "column", "tables": [(None, "b", None)]},
{"type": "function", "schema": []},
{"type": "keyword"},
]
)
def test_create_db_with_template():
suggestions = suggest_type(
"create database foo with template ", "create database foo with template "
)
assert sorted_dicts(suggestions) == sorted_dicts([{"type": "database"}])
@pytest.mark.parametrize("initial_text", ["", " ", "\t \t"])
def test_specials_included_for_initial_completion(initial_text):
suggestions = suggest_type(initial_text, initial_text)
assert sorted_dicts(suggestions) == sorted_dicts(
[{"type": "keyword"}, {"type": "special"}]
)
def test_specials_not_included_after_initial_token():
suggestions = suggest_type("create table foo (dt d", "create table foo (dt d")
assert sorted_dicts(suggestions) == sorted_dicts([{"type": "keyword"}])
def test_drop_schema_qualified_table_suggests_only_tables():
text = "DROP TABLE schema_name.table_name"
suggestions = suggest_type(text, text)
assert suggestions == [{"type": "table", "schema": "schema_name"}]
@pytest.mark.parametrize("text", [",", " ,", "sel ,"])
def test_handle_pre_completion_comma_gracefully(text):
suggestions = suggest_type(text, text)
assert iter(suggestions)
def test_cross_join():
text = "select * from v1 cross join v2 JOIN v1.id, "
suggestions = suggest_type(text, text)
assert sorted_dicts(suggestions) == sorted_dicts(
[
{"type": "table", "schema": []},
{"type": "view", "schema": []},
{"type": "schema"},
]
)
@pytest.mark.parametrize("expression", ["SELECT 1 AS ", "SELECT 1 FROM tabl AS "])
def test_after_as(expression):
suggestions = suggest_type(expression, expression)
assert set(suggestions) == set()
@pytest.mark.parametrize(
"expression",
[
"\\. ",
"select 1; \\. ",
"select 1;\\. ",
"select 1 ; \\. ",
"source ",
"truncate table test; source ",
"truncate table test ; source ",
"truncate table test;source ",
],
)
def test_source_is_file(expression):
suggestions = suggest_type(expression, expression)
assert suggestions == [{"type": "file_name"}]
| 30.795981
| 86
| 0.558745
|
777f29dc80ea5426553dbbbc35d4f566e2034938
| 666
|
py
|
Python
|
FamilyFood/manage.py
|
ZST-Devs/HomeFood
|
337edef7d6d567b1c0c6e4699d7b8c8ee4d91397
|
[
"MIT"
] | 2
|
2022-01-15T16:58:40.000Z
|
2022-03-11T09:42:59.000Z
|
FamilyFood/manage.py
|
ZST-Devs/HomeFood
|
337edef7d6d567b1c0c6e4699d7b8c8ee4d91397
|
[
"MIT"
] | null | null | null |
FamilyFood/manage.py
|
ZST-Devs/HomeFood
|
337edef7d6d567b1c0c6e4699d7b8c8ee4d91397
|
[
"MIT"
] | 1
|
2022-01-08T17:06:37.000Z
|
2022-01-08T17:06:37.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FamilyFood.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.956522
| 74
| 0.68018
|
2b2b73abde818c180d4d00fdce84088a3a251c19
| 1,626
|
py
|
Python
|
book_figures/chapter3/fig_prob_sum.py
|
StKyr/astroML_figures
|
45e9748335e0cd854d09319dff0e43ecd70e7b61
|
[
"BSD-2-Clause"
] | 6
|
2019-08-31T16:43:43.000Z
|
2021-07-10T06:06:20.000Z
|
book_figures/chapter3/fig_prob_sum.py
|
StKyr/astroML_figures
|
45e9748335e0cd854d09319dff0e43ecd70e7b61
|
[
"BSD-2-Clause"
] | 34
|
2018-09-10T22:35:07.000Z
|
2022-02-08T21:17:39.000Z
|
book_figures/chapter3/fig_prob_sum.py
|
StKyr/astroML_figures
|
45e9748335e0cd854d09319dff0e43ecd70e7b61
|
[
"BSD-2-Clause"
] | 10
|
2017-06-22T09:21:19.000Z
|
2020-01-26T03:54:26.000Z
|
"""
Sum of Probabilities
--------------------
Figure 3.1.
A representation of the sum of probabilities shown in eq.3.1.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
if "setup_text_plots" not in globals():
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
# create plot
fig = plt.figure(figsize=(5, 3.75), facecolor='w')
ax = plt.axes([0, 0, 1, 1], xticks=[], yticks=[], frameon=False)
# draw intersecting circles
ax.add_patch(plt.Circle((1.5, 0.2), 2.2, fc='gray', ec='black', alpha=0.5))
ax.add_patch(plt.Circle((-1.5, 0.2), 2.2, fc='gray', ec='black', alpha=0.5))
# add text
text_kwargs = dict(ha='center', va='center', fontsize=12)
ax.text(-1.6, 0.2, "$p(A)$", **text_kwargs)
ax.text(1.6, 0.2, "$p(B)$", **text_kwargs)
ax.text(0.0, 0.2, "$p(A \cap B)$", **text_kwargs)
ax.text(0, -2.3, "$p(A \cup B) = p(A) + p(B) - p(A \cap B)$", **text_kwargs)
ax.set_xlim(-4, 4)
ax.set_ylim(-3, 3)
plt.show()
| 36.133333
| 79
| 0.646986
|
c269af34f1630bf1627c9328617254269bbda3ae
| 2,804
|
py
|
Python
|
day21/day21.py
|
r0mi/aoc2018
|
6a0db3b4fc6f5417a9e86fd7fe0d6aa5348a1dd5
|
[
"MIT"
] | null | null | null |
day21/day21.py
|
r0mi/aoc2018
|
6a0db3b4fc6f5417a9e86fd7fe0d6aa5348a1dd5
|
[
"MIT"
] | null | null | null |
day21/day21.py
|
r0mi/aoc2018
|
6a0db3b4fc6f5417a9e86fd7fe0d6aa5348a1dd5
|
[
"MIT"
] | null | null | null |
# Let the 6 register be called A B ip C D E
# 0 seti 123 0 3 # C = 123
# 1 bani 3 456 3 # C &= 456 | C = 72
# 2 eqri 3 72 3 # C = (C == 72) | C = 1
# 3 addr 3 2 2 # ip += C | ip = 4 | GOTO 5
# 4 seti 0 0 2 # ip = 0 | GOTO 1
# 5 seti 0 6 3 # C = 0
# 6 bori 3 65536 4 # D = C | 65536 | D = 65536
# 7 seti 2176960 8 3 # C = 2176960
# 8 bani 4 255 1 # B = D & 255 | B = 0
# 9 addr 3 1 3 # C += B | C = 2176960
# 10 bani 3 16777215 3 # C &= 16777215 | C = 2176960
# 11 muli 3 65899 3 # C *= 65899 | C = 143459487040
# 12 bani 3 16777215 3 # C &= 16777215 | C = 14290240
# if (256 > D)
# GOTO 28
# else
# GOTO 17
# 13 gtir 256 4 1 # B = (256 > D) | B = 0
# 14 addr 1 2 2 # ip += B | ip = 14/15 | GOTO 15/16
# 15 addi 2 1 2 # ip += 1 | ip = 16 | GOTO 17
# 16 seti 27 7 2 # ip = 27 | GOTO 28
# 17 seti 0 9 1 # B = 0
# 18 addi 1 1 5 # E = B + 1
# 19 muli 5 256 5 # E *= 256
# if (E > D)
# GOTO 26
# else
# B = 1
# GOTO 18
# 20 gtrr 5 4 5 # E = (E > D) | E = 0
# 21 addr 5 2 2 # ip += E | ip = 21/22 | GOTO 22/23
# 22 addi 2 1 2 # ip += 1 | ip = 23 | GOTO 24
# 23 seti 25 7 2 # ip = 25 | GOTO 26
# 24 addi 1 1 1 # B += 1 | B = 1
# 25 seti 17 2 2 # ip = 17 | GOTO 18
# 26 setr 1 7 4 # D = B | D = 2
# 27 seti 7 9 2 # ip = 7 | GOTO 8
# if (C == A)
# RETURN
# else
# GOTO 6
# 28 eqrr 3 0 1 # B = (C == A) | B = 0
# 29 addr 1 2 2 # ip += B | ip = 29/30 | GOTO 30 / RETURN
# 30 seti 5 9 2 # ip = 5 | GOTO 6
# The main loop in short
A = B = C = D = E = 0
unique_c_values = set()
previous_unique_c = 0
while True:
# 6:
D = C | 65536
C = 2176960
while True:
# 8:
B = D & 255
C += B
C &= 16777215
C *= 65899
C &= 16777215
if 256 > D:
if len(unique_c_values) == 0:
print("Lowest register 0 value that causes the program to execute fewest instructions is", C)
if C not in unique_c_values:
previous_unique_c = C
unique_c_values.add(C)
else: # C values start to repeat, exit with previous unique value
print("Lowest register 0 value that causes the program to execute most instructions is", previous_unique_c)
exit(1)
if C == A:
exit(1)
else:
# GOTO 6
break
else:
# GOTO 17
# Optimised the following loop to
D >>= 8 # Basically dividing by 256 in a very slow manner
# 17:
# B = 0
# while True:
# # 18:
# E = (B + 1) * 256
# if E > D:
# D = B
# # GOTO 8
# break
# else:
# B += 1
# # GOTO 18
| 26.205607
| 111
| 0.455777
|
94949c429c34c6c519881a3fb8f8800aa2845337
| 1,405
|
py
|
Python
|
handbrakecloud/tests/base.py
|
mtreinish/handbrakecloud
|
3a3319db57ab78db81051cc75eb59ec65c6bb130
|
[
"Apache-2.0"
] | null | null | null |
handbrakecloud/tests/base.py
|
mtreinish/handbrakecloud
|
3a3319db57ab78db81051cc75eb59ec65c6bb130
|
[
"Apache-2.0"
] | null | null | null |
handbrakecloud/tests/base.py
|
mtreinish/handbrakecloud
|
3a3319db57ab78db81051cc75eb59ec65c6bb130
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import fixtures
import testtools
class TestCase(testtools.TestCase):
true = ('True', 'true', '1', 'yes')
def setUp(self):
super(TestCase, self).setUp()
if os.environ.get('OS_STDOUT_CAPTURE') in self.true:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in self.true:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if os.environ.get('OS_LOG_CAPTURE') != 'False':
if os.environ.get('OS_LOG_CAPTURE') != '0':
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
level=None))
| 39.027778
| 76
| 0.668327
|
9fa697eb048cee032422896ac5fe2fce245a2699
| 31,721
|
py
|
Python
|
src/C_WGAN_ResBlock/result_collect.py
|
chychen/nba_scrip_generation
|
942df59cc0426aa30b54a0e09c0f646aa8fd4f18
|
[
"MIT"
] | 1
|
2020-07-09T09:00:09.000Z
|
2020-07-09T09:00:09.000Z
|
src/C_WGAN_ResBlock/result_collect.py
|
chychen/bball_defensive_strategies_generation
|
942df59cc0426aa30b54a0e09c0f646aa8fd4f18
|
[
"MIT"
] | null | null | null |
src/C_WGAN_ResBlock/result_collect.py
|
chychen/bball_defensive_strategies_generation
|
942df59cc0426aa30b54a0e09c0f646aa8fd4f18
|
[
"MIT"
] | null | null | null |
"""
data url: http://140.113.210.14:6006/NBA/data/F2.npy
data description:
event by envet, with 300 sequence for each. (about 75 seconds)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import shutil
import json
import numpy as np
import tensorflow as tf
import game_visualizer
from utils import DataFactory
from Generator import G_MODEL
from Critic import C_MODEL
import plotly
plotly.__version__
plotly.tools.set_credentials_file(
username='ChenChiehYu', api_key='xh9rsxFXY6DNF1qAfUyQ')
import plotly.plotly as py
import plotly.graph_objs as go
FLAGS = tf.app.flags.FLAGS
# path parameters
tf.app.flags.DEFINE_string('folder_path', None,
"summary directory")
tf.app.flags.DEFINE_string('data_path', '../../data/FixedFPS5-Train.npy',
"summary directory")
tf.app.flags.DEFINE_string('restore_path', None,
"path of saving model eg: checkpoints/model.ckpt-5")
# input parameters
tf.app.flags.DEFINE_integer('seq_length', 100,
"the maximum length of one training data")
# model parameters
tf.app.flags.DEFINE_string('gpus', '1',
"define visible gpus")
tf.app.flags.DEFINE_integer('batch_size', 128,
"batch size")
tf.app.flags.DEFINE_integer('latent_dims', 100,
"latent_dims")
# collect mode
tf.app.flags.DEFINE_integer('n_latents', 100,
"n_latents")
tf.app.flags.DEFINE_integer('n_conditions', 128 * 9, # because there are 9 batch_size data amount in validation set
"n_conditions")
tf.app.flags.DEFINE_bool('is_valid', True,
"is_valid")
tf.app.flags.DEFINE_integer('mode', None,
"mode to collect, \
1 -> to collect results \
2 -> to show diversity \
3 -> to visulize weight \
4 -> to analize code, only change first dimension for comparison \
5 -> to calculate hueristic score on selected result\
6 -> to draw different length result \
7 -> to draw feature map \
8 -> to find high-openshot-penalty data in real dataset \
9 -> to collect results vary in length")
# VISIBLE GPUS
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpus
def z_samples(batch_size):
return np.random.normal(
0., 1., size=[batch_size, FLAGS.latent_dims])
def mode_1(sess, graph, save_path, is_valid=FLAGS.is_valid):
""" to collect results
Saved Result
------------
results_A_fake_B : float, numpy ndarray, shape=[n_latents=100, n_conditions=128*9, length=100, features=23]
Real A + Fake B
results_A_real_B : float, numpy ndarray, shape=[n_conditions=128*9, length=100, features=23]
Real A + Real B
results_critic_scores : float, numpy ndarray, shape=[n_latents=100, n_conditions=128*9]
critic scores for each input data
"""
# placeholder tensor
latent_input_t = graph.get_tensor_by_name('latent_input:0')
team_a_t = graph.get_tensor_by_name('team_a:0')
G_samples_t = graph.get_tensor_by_name('G_samples:0')
matched_cond_t = graph.get_tensor_by_name('matched_cond:0')
# result tensor
result_t = graph.get_tensor_by_name(
'Generator/G_inference/conv_result/conv1d/Maximum:0')
# critic_scores_t = graph.get_tensor_by_name(
# 'Critic/C_inference_1/conv_output/Reshape:0')
critic_scores_t = graph.get_tensor_by_name(
'Critic/C_inference_1/linear_result/BiasAdd:0')
if not os.path.exists(save_path):
os.makedirs(save_path)
real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
print('real_data.shape', real_data.shape)
# normalize
data_factory = DataFactory(real_data)
# result collector
results_A_fake_B = []
results_A_real_B = []
results_critic_scores = []
# shuffle the data
train_data, valid_data = data_factory.fetch_data()
if is_valid:
target_data = valid_data
else:
target_data = train_data
target_data = np.load('ADD-100.npy')
team_AB = np.concatenate(
[
# ball
target_data[:, :, 0, :3].reshape(
[target_data.shape[0], target_data.shape[1], 1 * 3]),
# team A players
target_data[:, :, 1:6, :2].reshape(
[target_data.shape[0], target_data.shape[1], 5 * 2]),
# team B players
target_data[:, :, 6:11, :2].reshape(
[target_data.shape[0], target_data.shape[1], 5 * 2])
], axis=-1
)
team_AB = data_factory.normalize(team_AB)
print(team_AB.shape)
dummy_AB = np.zeros(shape=[98, 100, 23])
team_AB = np.concatenate([team_AB, dummy_AB], axis=0)
team_A = team_AB[:, :, :13]
team_B = team_AB[:, :, 13:]
# for idx in range(0, FLAGS.batch_size, FLAGS.batch_size):
real_samples = team_B
real_conds = team_A
# generate result
temp_critic_scores = []
temp_A_fake_B = []
for i in range(FLAGS.n_latents):
latents = z_samples(FLAGS.batch_size)
feed_dict = {
latent_input_t: latents,
team_a_t: real_conds
}
result = sess.run(
result_t, feed_dict=feed_dict)
feed_dict = {
G_samples_t: result,
matched_cond_t: real_conds
}
critic_scores = sess.run(
critic_scores_t, feed_dict=feed_dict)
temp_A_fake_B.append(data_factory.recover_data(
np.concatenate([real_conds, result], axis=-1)))
temp_critic_scores.append(critic_scores)
results_A_fake_B.append(temp_A_fake_B)
results_critic_scores.append(temp_critic_scores)
# concat along with conditions dimension (axis=1)
results_A_fake_B = np.concatenate(results_A_fake_B, axis=1)
results_critic_scores = np.concatenate(results_critic_scores, axis=1)
results_A = data_factory.recover_BALL_and_A(
real_conds)
results_real_B = data_factory.recover_B(
real_samples)
results_A_real_B = np.concatenate([results_A, results_real_B], axis=-1)
# saved as numpy
print(np.array(results_A_fake_B).shape)
print(np.array(results_A_real_B).shape)
print(np.array(results_critic_scores).shape)
np.save(save_path + 'results_A_fake_B.npy',
np.array(results_A_fake_B)[:, :30].astype(np.float32).reshape([FLAGS.n_latents, 30, FLAGS.seq_length, 23]))
np.save(save_path + 'results_A_real_B.npy',
np.array(results_A_real_B)[:30].astype(np.float32).reshape([30, FLAGS.seq_length, 23]))
np.save(save_path + 'results_critic_scores.npy',
np.array(results_critic_scores)[:, :30].astype(np.float32).reshape([FLAGS.n_latents, 30]))
print('!!Completely Saved!!')
def mode_2(sess, graph, save_path, is_valid=FLAGS.is_valid):
""" to show diversity
Saved Result
------------
results_A_fake_B : float, numpy ndarray, shape=[latent_dims=10, n_latents=11, n_conditions=128, length=100, features=23]
Real A + Fake B
results_A_real_B : float, numpy ndarray, shape=[n_conditions=128, length=100, features=23]
Real A + Real B
results_critic_scores : float, numpy ndarray, shape=[latent_dims=10, n_latents=11, n_conditions=128]
critic scores for each input data
"""
n_latents = 100
latent_dims = 1
# placeholder tensor
latent_input_t = graph.get_tensor_by_name('latent_input:0')
team_a_t = graph.get_tensor_by_name('team_a:0')
G_samples_t = graph.get_tensor_by_name('G_samples:0')
matched_cond_t = graph.get_tensor_by_name('matched_cond:0')
# result tensor
result_t = graph.get_tensor_by_name(
'Generator/G_inference/conv_result/conv1d/Maximum:0')
critic_scores_t = graph.get_tensor_by_name(
'Critic/C_inference_1/linear_result/BiasAdd:0')
# 'Generator/G_loss/C_inference/linear_result/Reshape:0')
if not os.path.exists(save_path):
os.makedirs(save_path)
real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
print('real_data.shape', real_data.shape)
# normalize
data_factory = DataFactory(real_data)
# result collector
results_A_fake_B = []
results_A_real_B = []
results_critic_scores = []
# shuffle the data
train_data, valid_data = data_factory.fetch_data()
if is_valid:
target_data = valid_data
else:
target_data = train_data
real_samples = target_data['B'][:512:4]
real_conds = target_data['A'][:512:4]
# generate result
for target_dim in range(latent_dims):
temp_critic_scores = []
temp_A_fake_B = []
for i in range(n_latents):
latents = z_samples(FLAGS.batch_size)
feed_dict = {
latent_input_t: latents,
team_a_t: real_conds
}
result = sess.run(
result_t, feed_dict=feed_dict)
feed_dict = {
G_samples_t: result,
matched_cond_t: real_conds
}
critic_scores = sess.run(
critic_scores_t, feed_dict=feed_dict)
temp_A_fake_B.append(data_factory.recover_data(
np.concatenate([real_conds, result], axis=-1)))
temp_critic_scores.append(critic_scores)
results_A_fake_B.append(temp_A_fake_B)
results_critic_scores.append(temp_critic_scores)
# concat along with conditions dimension
results_A_fake_B = np.concatenate(results_A_fake_B, axis=0)
results_critic_scores = np.concatenate(results_critic_scores, axis=0)
results_A = data_factory.recover_BALL_and_A(
target_data['A'][:512:4])
results_real_B = data_factory.recover_B(
target_data['B'][:512:4])
results_A_real_B = np.concatenate([results_A, results_real_B], axis=-1)
# saved as numpy
print(np.array(results_A_fake_B).shape)
print(np.array(results_A_real_B).shape)
print(np.array(results_critic_scores).shape)
np.save(save_path + 'results_A_fake_B.npy',
np.array(results_A_fake_B).astype(np.float32).reshape([latent_dims, n_latents, 128, FLAGS.seq_length, 23]))
np.save(save_path + 'results_A_real_B.npy',
np.array(results_A_real_B).astype(np.float32).reshape([128, FLAGS.seq_length, 23]))
np.save(save_path + 'results_critic_scores.npy',
np.array(results_critic_scores).astype(np.float32).reshape([latent_dims, n_latents, 128]))
print('!!Completely Saved!!')
def weight_vis(graph, save_path):
""" to visulize weight
"""
if not os.path.exists(save_path):
os.makedirs(save_path)
def __get_var_list(tag):
""" to get both Generator's and Discriminator's trainable variables
and add trainable variables into histogram
"""
trainable_V = tf.trainable_variables()
for _, v in enumerate(trainable_V):
if tag in v.name:
return v
# sesstion config
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
saver = tf.train.import_meta_graph(FLAGS.restore_path + '.meta')
with tf.Session(config=config) as sess:
# restored
saver.restore(sess, FLAGS.restore_path)
# target tensor
conds_linear = __get_var_list('G_inference/conds_linear')
latents_linear = __get_var_list('G_inference/latents_linear')
print(conds_linear.shape)
print(latents_linear.shape)
conds_linear_result, latents_linear_result = sess.run(
[conds_linear, latents_linear])
trace = go.Heatmap(z=np.concatenate(
[conds_linear_result, latents_linear_result], axis=0))
data = [trace]
plotly.offline.plot(data, filename=os.path.join(
save_path, 'G_inference_input.html'))
print('!!Completely Saved!!')
def mode_4(sess, graph, save_path, is_valid=FLAGS.is_valid):
""" to analize code, only change first dimension for comparison
Saved Result
------------
results_A_fake_B : float, numpy ndarray, shape=[n_latents=11, n_conditions=128*9, length=100, features=23]
Real A + Fake B
results_A_real_B : float, numpy ndarray, shape=[n_latents=11, n_conditions=128*9, length=100, features=23]
Real A + Real B
results_critic_scores : float, numpy ndarray, shape=[n_latents=11, n_conditions=128*9]
critic scores for each input data
"""
target_dims = 0
n_latents = 11
# placeholder tensor
latent_input_t = graph.get_tensor_by_name('latent_input:0')
team_a_t = graph.get_tensor_by_name('team_a:0')
G_samples_t = graph.get_tensor_by_name('G_samples:0')
matched_cond_t = graph.get_tensor_by_name('matched_cond:0')
# result tensor
result_t = graph.get_tensor_by_name(
'Generator/G_inference/conv_result/conv1d/Maximum:0')
critic_scores_t = graph.get_tensor_by_name(
'Critic/C_inference_1/linear_result/BiasAdd:0')
# 'Generator/G_loss/C_inference/linear_result/Reshape:0')
if not os.path.exists(save_path):
os.makedirs(save_path)
real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
print('real_data.shape', real_data.shape)
# normalize
data_factory = DataFactory(real_data)
# result collector
results_A_fake_B = []
results_A_real_B = []
results_critic_scores = []
# shuffle the data
train_data, valid_data = data_factory.fetch_data()
if is_valid:
target_data = valid_data
else:
target_data = train_data
latents = z_samples(FLAGS.batch_size)
for idx in range(0, FLAGS.n_conditions, FLAGS.batch_size):
real_samples = target_data['B'][idx:idx + FLAGS.batch_size]
real_conds = target_data['A'][idx:idx + FLAGS.batch_size]
# generate result
temp_critic_scores = []
temp_A_fake_B = []
for i in range(n_latents):
latents[:, target_dims] = -2.5 + 0.5 * i
feed_dict = {
latent_input_t: latents,
team_a_t: real_conds
}
result = sess.run(
result_t, feed_dict=feed_dict)
feed_dict = {
G_samples_t: result,
matched_cond_t: real_conds
}
critic_scores = sess.run(
critic_scores_t, feed_dict=feed_dict)
temp_A_fake_B.append(data_factory.recover_data(
np.concatenate([real_conds, result], axis=-1)))
temp_critic_scores.append(critic_scores)
results_A_fake_B.append(temp_A_fake_B)
results_critic_scores.append(temp_critic_scores)
# concat along with conditions dimension (axis=1)
results_A_fake_B = np.concatenate(results_A_fake_B, axis=1)
results_critic_scores = np.concatenate(results_critic_scores, axis=1)
results_A = data_factory.recover_BALL_and_A(
target_data['A'][:FLAGS.n_conditions])
results_real_B = data_factory.recover_B(
target_data['B'][:FLAGS.n_conditions])
results_A_real_B = np.concatenate([results_A, results_real_B], axis=-1)
# saved as numpy
print(np.array(results_A_fake_B).shape)
print(np.array(results_A_real_B).shape)
print(np.array(results_critic_scores).shape)
np.save(save_path + 'results_A_fake_B.npy',
np.array(results_A_fake_B).astype(np.float32).reshape([n_latents, FLAGS.n_conditions, FLAGS.seq_length, 23]))
np.save(save_path + 'results_A_real_B.npy',
np.array(results_A_real_B).astype(np.float32).reshape([FLAGS.n_conditions, FLAGS.seq_length, 23]))
np.save(save_path + 'results_critic_scores.npy',
np.array(results_critic_scores).astype(np.float32).reshape([n_latents, FLAGS.n_conditions]))
print('!!Completely Saved!!')
def mode_5(sess, graph, save_path):
""" to calculate hueristic score on selected result
"""
NORMAL_C_ID = [154, 108, 32, 498, 2, 513, 263, 29, 439, 249, 504, 529, 24, 964, 641, 739, 214, 139, 819, 1078, 772, 349, 676, 1016, 582, 678, 39, 279,
918, 477, 809, 505, 896, 600, 564, 50, 810, 1132, 683, 578, 1131, 887, 621, 1097, 665, 528, 310, 631, 1102, 6, 945, 1020, 853, 490, 64, 1002, 656]
NORMAL_N_ID = [58, 5, 47, 66, 79, 21, 70, 54, 3, 59, 67, 59, 84, 38, 71, 62, 55, 86, 14, 83, 94, 97, 83, 27, 38, 68, 95,
26, 60, 2, 54, 46, 34, 75, 38, 4, 59, 87, 52, 44, 92, 28, 86, 71, 24, 28, 13, 70, 87, 44, 52, 25, 59, 61, 86, 16, 98]
GOOD_C_ID = [976, 879, 293, 750, 908, 878, 831, 1038, 486, 268,
265, 252, 1143, 383, 956, 974, 199, 777, 585, 34, 932]
GOOD_N_ID = [52, 16, 87, 43, 45, 66, 22, 77, 36,
50, 47, 9, 34, 9, 82, 42, 65, 43, 7, 29, 62]
BEST_C_ID = [570, 517, 962, 1088, 35, 623, 1081, 33, 255, 571,
333, 990, 632, 431, 453, 196, 991, 267, 591, 902, 597, 646]
BEST_N_ID = [22, 42, 76, 92, 12, 74, 92, 58, 69, 69,
23, 63, 89, 7, 74, 27, 12, 20, 35, 77, 62, 63]
DUMMY_ID = np.zeros(shape=[28])
ALL_C_ID = np.concatenate(
[NORMAL_C_ID, GOOD_C_ID, BEST_C_ID, DUMMY_ID]).astype(np.int32)
ALL_N_ID = np.concatenate(
[NORMAL_N_ID, GOOD_N_ID, BEST_N_ID, DUMMY_ID]).astype(np.int32)
print(ALL_C_ID.shape)
print(ALL_N_ID.shape)
fake_result_AB = np.load(
'v3/2/collect/mode_1/results_A_fake_B.npy')[ALL_N_ID, ALL_C_ID]
real_result_AB = np.load(
'v3/2/collect/mode_1/results_A_real_B.npy')[ALL_C_ID]
print(fake_result_AB.shape)
print(real_result_AB.shape)
# normalize
real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
print('real_data.shape', real_data.shape)
data_factory = DataFactory(real_data)
fake_result_AB = data_factory.normalize(fake_result_AB)
real_result_AB = data_factory.normalize(real_result_AB)
# placeholder tensor
real_data_t = graph.get_tensor_by_name('real_data:0')
matched_cond_t = graph.get_tensor_by_name('matched_cond:0')
# result tensor
heuristic_penalty_pframe = graph.get_tensor_by_name(
'Critic/C_inference/heuristic_penalty/Min:0')
# 'Generator/G_loss/C_inference/linear_result/Reshape:0')
if not os.path.exists(save_path):
os.makedirs(save_path)
# real
feed_dict = {
real_data_t: real_result_AB[:, :, 13:23],
matched_cond_t: real_result_AB[:, :, :13]
}
real_hp_pframe = sess.run(heuristic_penalty_pframe, feed_dict=feed_dict)
# fake
feed_dict = {
real_data_t: fake_result_AB[:, :, 13:23],
matched_cond_t: fake_result_AB[:, :, :13]
}
fake_hp_pframe = sess.run(heuristic_penalty_pframe, feed_dict=feed_dict)
print(np.mean(real_hp_pframe[:100]))
print(np.mean(fake_hp_pframe[:100]))
print('!!Completely Saved!!')
def mode_6(sess, graph, save_path):
""" to draw different length result
"""
# normalize
real_data = np.load(FLAGS.data_path)
print('real_data.shape', real_data.shape)
data_factory = DataFactory(real_data)
target_data = np.load('FEATURES-7.npy')[:, :]
team_AB = np.concatenate(
[
# ball
target_data[:, :, 0, :3].reshape(
[target_data.shape[0], target_data.shape[1], 1 * 3]),
# team A players
target_data[:, :, 1:6, :2].reshape(
[target_data.shape[0], target_data.shape[1], 5 * 2]),
# team B players
target_data[:, :, 6:11, :2].reshape(
[target_data.shape[0], target_data.shape[1], 5 * 2])
], axis=-1
)
team_AB = data_factory.normalize(team_AB)
team_A = team_AB[:, :, :13]
team_B = team_AB[:, :, 13:]
# placeholder tensor
latent_input_t = graph.get_tensor_by_name('latent_input:0')
team_a_t = graph.get_tensor_by_name('team_a:0')
# result tensor
result_t = graph.get_tensor_by_name(
'Generator/G_inference/conv_result/conv1d/Maximum:0')
if not os.path.exists(save_path):
os.makedirs(save_path)
# result collector
latents = z_samples(team_AB.shape[0])
feed_dict = {
latent_input_t: latents,
team_a_t: team_A
}
result_fake_B = sess.run(result_t, feed_dict=feed_dict)
results_A_fake_B = np.concatenate([team_A, result_fake_B], axis=-1)
results_A_fake_B = data_factory.recover_data(results_A_fake_B)
for i in range(results_A_fake_B.shape[0]):
game_visualizer.plot_data(
results_A_fake_B[i], target_data.shape[1], file_path=save_path + str(i) + '.mp4', if_save=True)
print('!!Completely Saved!!')
def mode_7(sess, graph, save_path):
""" to draw feature map
"""
# normalize
real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
print('real_data.shape', real_data.shape)
data_factory = DataFactory(real_data)
target_data = np.load('FEATURES-6.npy')[:6]
team_AB = np.concatenate(
[
# ball
target_data[:, :, 0, :3].reshape(
[target_data.shape[0], target_data.shape[1], 1 * 3]),
# team A players
target_data[:, :, 1:6, :2].reshape(
[target_data.shape[0], target_data.shape[1], 5 * 2]),
# team B players
target_data[:, :, 6:11, :2].reshape(
[target_data.shape[0], target_data.shape[1], 5 * 2])
], axis=-1
)
dummy_AB = np.zeros(shape=[128 - 6, 100, 23])
team_AB = np.concatenate([team_AB, dummy_AB], axis=0)
team_AB = data_factory.normalize(team_AB)
team_A = team_AB[:, :, :13]
team_B = team_AB[:, :, 13:]
# placeholder tensor
latent_input_t = graph.get_tensor_by_name('latent_input:0')
team_a_t = graph.get_tensor_by_name('team_a:0')
# result tensor
conds_linear_t = graph.get_tensor_by_name(
'Generator/G_inference/conds_linear/BiasAdd:0')
if not os.path.exists(save_path):
os.makedirs(save_path)
# result collector
latents = np.concatenate([z_samples(1)
for i in range(FLAGS.batch_size)], axis=0)
feed_dict = {
latent_input_t: latents,
team_a_t: team_A
}
conds_linear = sess.run(conds_linear_t, feed_dict=feed_dict)
for i in range(6):
trace = go.Heatmap(z=conds_linear[i])
data = [trace]
plotly.offline.plot(data, filename=os.path.join(
save_path, 'G_conds_linear' + str(i) + '.html'))
print('!!Completely Saved!!')
def mode_8(sess, graph, save_path):
""" to find high-openshot-penalty data in 1000 real data
"""
real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
print('real_data.shape', real_data.shape)
data_factory = DataFactory(real_data)
train_data, valid_data = data_factory.fetch_data()
# placeholder tensor
real_data_t = graph.get_tensor_by_name('real_data:0')
matched_cond_t = graph.get_tensor_by_name('matched_cond:0')
# result tensor
heuristic_penalty_pframe = graph.get_tensor_by_name(
'Critic/C_inference/heuristic_penalty/Min:0')
# 'Generator/G_loss/C_inference/linear_result/Reshape:0')
if not os.path.exists(save_path):
os.makedirs(save_path)
real_hp_pframe_all = []
for batch_id in range(train_data['A'].shape[0] // FLAGS.batch_size):
index_id = batch_id * FLAGS.batch_size
real_data = train_data['B'][index_id:index_id + FLAGS.batch_size]
cond_data = train_data['A'][index_id:index_id + FLAGS.batch_size]
# real
feed_dict = {
real_data_t: real_data,
matched_cond_t: cond_data
}
real_hp_pframe = sess.run(
heuristic_penalty_pframe, feed_dict=feed_dict)
real_hp_pframe_all.append(real_hp_pframe)
real_hp_pframe_all = np.concatenate(real_hp_pframe_all, axis=0)
print(real_hp_pframe_all.shape)
real_hp_pdata = np.mean(real_hp_pframe_all, axis=1)
mean_ = np.mean(real_hp_pdata)
std_ = np.std(real_hp_pdata)
print(mean_)
print(std_)
concat_AB = np.concatenate(
[train_data['A'], train_data['B']], axis=-1)
recoverd = data_factory.recover_data(concat_AB)
for i, v in enumerate(real_hp_pdata):
if v > (mean_ + 2 * std_):
print('bad', i, v)
game_visualizer.plot_data(
recoverd[i], recoverd.shape[1], file_path=save_path + 'bad_' + str(i) + '_' + str(v) + '.mp4', if_save=True)
if v < 0.0025:
print('good', i, v)
game_visualizer.plot_data(
recoverd[i], recoverd.shape[1], file_path=save_path + 'good_' + str(i) + '_' + str(v) + '.mp4', if_save=True)
print('!!Completely Saved!!')
def mode_9(sess, graph, save_path, is_valid=FLAGS.is_valid):
""" to collect results vary in length
Saved Result
------------
results_A_fake_B : float, numpy ndarray, shape=[n_latents=100, n_conditions=100, length=100, features=23]
Real A + Fake B
results_A_real_B : float, numpy ndarray, shape=[n_conditions=100, length=100, features=23]
Real A + Real B
results_critic_scores : float, numpy ndarray, shape=[n_latents=100, n_conditions=100]
critic scores for each input data
"""
# placeholder tensor
latent_input_t = graph.get_tensor_by_name('Generator/latent_input:0')
team_a_t = graph.get_tensor_by_name('Generator/team_a:0')
G_samples_t = graph.get_tensor_by_name('Critic/G_samples:0')
matched_cond_t = graph.get_tensor_by_name('Critic/matched_cond:0')
# result tensor
result_t = graph.get_tensor_by_name(
'Generator/G_inference/conv_result/conv1d/Maximum:0')
critic_scores_t = graph.get_tensor_by_name(
'Critic/C_inference_1/conv_output/Reshape:0')
if not os.path.exists(save_path):
os.makedirs(save_path)
real_data = np.load(FLAGS.data_path)[:, :FLAGS.seq_length, :, :]
print('real_data.shape', real_data.shape)
# DataFactory
data_factory = DataFactory(real_data)
# target data
target_data = np.load('../../data/FixedFPS5.npy')[-100:]
target_length = np.load('../../data/FixedFPS5Length.npy')[-100:]
print('target_data.shape', target_data.shape)
team_AB = np.concatenate(
[
# ball
target_data[:, :, 0, :3].reshape(
[target_data.shape[0], target_data.shape[1], 1 * 3]),
# team A players
target_data[:, :, 1:6, :2].reshape(
[target_data.shape[0], target_data.shape[1], 5 * 2]),
# team B players
target_data[:, :, 6:11, :2].reshape(
[target_data.shape[0], target_data.shape[1], 5 * 2])
], axis=-1
)
team_AB = data_factory.normalize(team_AB)
team_A = team_AB[:, :, :13]
team_B = team_AB[:, :, 13:]
# result collector
results_A_fake_B = []
results_A_real_B = []
results_critic_scores = []
for idx in range(team_AB.shape[0]):
# given 100(FLAGS.n_latents) latents generate 100 results on same condition at once
real_samples = team_B[idx:idx + 1, :target_length[idx]]
real_samples = np.concatenate(
[real_samples for _ in range(FLAGS.n_latents)], axis=0)
real_conds = team_A[idx:idx + 1, :target_length[idx]]
real_conds = np.concatenate(
[real_conds for _ in range(FLAGS.n_latents)], axis=0)
# generate result
latents = z_samples(FLAGS.n_latents)
feed_dict = {
latent_input_t: latents,
team_a_t: real_conds
}
result = sess.run(
result_t, feed_dict=feed_dict)
# calculate em distance
feed_dict = {
G_samples_t: result,
matched_cond_t: real_conds
}
em_dist = sess.run(
critic_scores_t, feed_dict=feed_dict)
recoverd_A_fake_B = data_factory.recover_data(
np.concatenate([real_conds, result], axis=-1))
# padding to length=200
dummy = np.zeros(
shape=[FLAGS.n_latents, team_AB.shape[1] - target_length[idx], team_AB.shape[2]])
temp_A_fake_B_concat = np.concatenate(
[recoverd_A_fake_B, dummy], axis=1)
results_A_fake_B.append(temp_A_fake_B_concat)
results_critic_scores.append(em_dist)
print(np.array(results_A_fake_B).shape)
print(np.array(results_critic_scores).shape)
# concat along with conditions dimension (axis=1)
results_A_fake_B = np.stack(results_A_fake_B, axis=1)
results_critic_scores = np.stack(results_critic_scores, axis=1)
# real data
results_A = data_factory.recover_BALL_and_A(team_A)
results_real_B = data_factory.recover_B(team_B)
results_A_real_B = data_factory.recover_data(team_AB)
# saved as numpy
print(np.array(results_A_fake_B).shape)
print(np.array(results_A_real_B).shape)
print(np.array(results_critic_scores).shape)
np.save(save_path + 'results_A_fake_B.npy',
np.array(results_A_fake_B).astype(np.float32).reshape([FLAGS.n_latents, team_AB.shape[0], team_AB.shape[1], 23]))
np.save(save_path + 'results_A_real_B.npy',
np.array(results_A_real_B).astype(np.float32).reshape([team_AB.shape[0], team_AB.shape[1], 23]))
np.save(save_path + 'results_critic_scores.npy',
np.array(results_critic_scores).astype(np.float32).reshape([FLAGS.n_latents, team_AB.shape[0]]))
print('!!Completely Saved!!')
def main(_):
with tf.get_default_graph().as_default() as graph:
# sesstion config
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
saver = tf.train.import_meta_graph(FLAGS.restore_path + '.meta')
with tf.Session(config=config) as sess:
# restored
saver.restore(sess, FLAGS.restore_path)
# collect
if FLAGS.mode == 1:
mode_1(sess, graph, save_path=os.path.join(
COLLECT_PATH, 'mode_1/'))
elif FLAGS.mode == 2:
mode_2(sess, graph, save_path=os.path.join(
COLLECT_PATH, 'mode_2/'))
elif FLAGS.mode == 3:
weight_vis(graph, save_path=os.path.join(
COLLECT_PATH, 'mode_3/'))
elif FLAGS.mode == 4:
mode_4(sess, graph, save_path=os.path.join(
COLLECT_PATH, 'mode_4/'))
elif FLAGS.mode == 5:
mode_5(sess, graph, save_path=os.path.join(
COLLECT_PATH, 'mode_5/'))
elif FLAGS.mode == 6:
mode_6(sess, graph, save_path=os.path.join(
COLLECT_PATH, 'mode_6/'))
elif FLAGS.mode == 7:
mode_7(sess, graph, save_path=os.path.join(
COLLECT_PATH, 'mode_7/'))
elif FLAGS.mode == 8:
mode_8(sess, graph, save_path=os.path.join(
COLLECT_PATH, 'mode_8/'))
elif FLAGS.mode == 9:
_, iterations = str(FLAGS.restore_path).split('-')
mode_9(sess, graph, save_path=os.path.join(
COLLECT_PATH, 'mode_9/', iterations))
if __name__ == '__main__':
assert FLAGS.restore_path is not None
assert FLAGS.mode is not None
assert FLAGS.folder_path is not None
global COLLECT_PATH
COLLECT_PATH = os.path.join(FLAGS.folder_path, 'collect/')
if not os.path.exists(COLLECT_PATH):
os.makedirs(COLLECT_PATH)
tf.app.run()
| 40.667949
| 165
| 0.635699
|
afe878b466efbc34e82fa3d3e7cf5198e2100044
| 582
|
py
|
Python
|
webarm/widgets/migrations/0001_initial.py
|
AlexGolovaschenko/WebArm
|
1019b73b1ad5560a626261bbce05137c01edcee5
|
[
"MIT"
] | null | null | null |
webarm/widgets/migrations/0001_initial.py
|
AlexGolovaschenko/WebArm
|
1019b73b1ad5560a626261bbce05137c01edcee5
|
[
"MIT"
] | null | null | null |
webarm/widgets/migrations/0001_initial.py
|
AlexGolovaschenko/WebArm
|
1019b73b1ad5560a626261bbce05137c01edcee5
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.1 on 2021-01-15 15:13
from django.db import migrations, models
import widgets.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='WidgetsTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('template', models.JSONField(default=widgets.models.get_default_template, verbose_name='Шаблон виджетов')),
],
),
]
| 25.304348
| 124
| 0.62543
|
d162a9563fa66f037abe34273116821a38b834e6
| 8,725
|
py
|
Python
|
test/python/pulse/test_commands.py
|
tareqdandachi/qiskit-terra
|
5221fe330adba5529bfa22dc25262ac8e6291aaf
|
[
"Apache-2.0"
] | 3
|
2019-05-19T17:39:38.000Z
|
2020-01-28T19:59:18.000Z
|
test/python/pulse/test_commands.py
|
tareqdandachi/qiskit-terra
|
5221fe330adba5529bfa22dc25262ac8e6291aaf
|
[
"Apache-2.0"
] | 4
|
2019-05-13T15:28:46.000Z
|
2019-12-19T20:47:02.000Z
|
test/python/pulse/test_commands.py
|
tareqdandachi/qiskit-terra
|
5221fe330adba5529bfa22dc25262ac8e6291aaf
|
[
"Apache-2.0"
] | 1
|
2021-07-07T16:55:41.000Z
|
2021-07-07T16:55:41.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name,unexpected-keyword-arg
"""Test cases for the pulse command group."""
import unittest
import numpy as np
from qiskit.pulse import (SamplePulse, Acquire, FrameChange, PersistentValue,
Snapshot, Kernel, Discriminator, functional_pulse,
Delay, PulseError)
from qiskit.test import QiskitTestCase
class TestSamplePulse(QiskitTestCase):
"""SamplePulse tests."""
def test_sample_pulse(self):
"""Test pulse initialization."""
n_samples = 100
samples = np.linspace(0, 1., n_samples, dtype=np.complex128)
name = 'test'
sample_pulse = SamplePulse(samples, name=name)
self.assertEqual(sample_pulse.samples.dtype, np.complex128)
np.testing.assert_almost_equal(sample_pulse.samples, samples)
self.assertEqual(sample_pulse.duration, n_samples)
self.assertEqual(sample_pulse.name, name)
def test_type_casting(self):
"""Test casting of input samples to numpy array."""
n_samples = 100
samples_f64 = np.linspace(0, 1., n_samples, dtype=np.float64)
sample_pulse_f64 = SamplePulse(samples_f64)
self.assertEqual(sample_pulse_f64.samples.dtype, np.complex128)
samples_c64 = np.linspace(0, 1., n_samples, dtype=np.complex64)
sample_pulse_c64 = SamplePulse(samples_c64)
self.assertEqual(sample_pulse_c64.samples.dtype, np.complex128)
samples_list = np.linspace(0, 1., n_samples).tolist()
sample_pulse_list = SamplePulse(samples_list)
self.assertEqual(sample_pulse_list.samples.dtype, np.complex128)
def test_pulse_limits(self):
"""Test that limits of pulse norm of one are enforced properly."""
# test norm is correct for complex128 numpy data
unit_pulse_c128 = np.exp(1j*2*np.pi*np.linspace(0, 1, 1000), dtype=np.complex128)
# test does not raise error
try:
SamplePulse(unit_pulse_c128)
except PulseError:
self.fail('SamplePulse incorrectly failed on approximately unit norm samples.')
invalid_const = 1.1
with self.assertRaises(PulseError):
SamplePulse(invalid_const*np.exp(1j*2*np.pi*np.linspace(0, 1, 1000)))
# Test case where data is converted to python types with complex as a list
# with form [re, im] and back to a numpy array.
# This is how the transport layer handles samples in the qobj so it is important
# to test.
unit_pulse_c64 = np.exp(1j*2*np.pi*np.linspace(0, 1, 1000), dtype=np.complex64)
sample_components = np.stack(np.transpose([np.real(unit_pulse_c64),
np.imag(unit_pulse_c64)]))
pulse_list = sample_components.tolist()
recombined_pulse = [sample[0]+sample[1]*1j for sample in pulse_list]
# test does not raise error
try:
SamplePulse(recombined_pulse)
except PulseError:
self.fail('SamplePulse incorrectly failed to approximately unit norm samples.')
class TestAcquire(QiskitTestCase):
"""Acquisition tests."""
def test_can_construct_valid_acquire_command(self):
"""Test if valid acquire command can be constructed.
"""
kernel_opts = {
'start_window': 0,
'stop_window': 10
}
kernel = Kernel(name='boxcar', **kernel_opts)
discriminator_opts = {
'neighborhoods': [{'qubits': 1, 'channels': 1}],
'cal': 'coloring',
'resample': False
}
discriminator = Discriminator(name='linear_discriminator', **discriminator_opts)
acq_command = Acquire(duration=10, kernel=kernel, discriminator=discriminator)
self.assertEqual(acq_command.duration, 10)
self.assertEqual(acq_command.discriminator.name, 'linear_discriminator')
self.assertEqual(acq_command.discriminator.params, discriminator_opts)
self.assertEqual(acq_command.kernel.name, 'boxcar')
self.assertEqual(acq_command.kernel.params, kernel_opts)
self.assertTrue(acq_command.name.startswith('acq'))
def test_can_construct_acquire_command_with_default_values(self):
"""Test if an acquire command can be constructed with default discriminator and kernel.
"""
acq_command_a = Acquire(duration=10)
acq_command_b = Acquire(duration=10)
self.assertEqual(acq_command_a.duration, 10)
self.assertEqual(acq_command_a.discriminator, None)
self.assertEqual(acq_command_a.kernel, None)
self.assertTrue(acq_command_a.name.startswith('acq'))
self.assertNotEqual(acq_command_a.name, acq_command_b.name)
self.assertEqual(acq_command_b.name, 'acq' + str(int(acq_command_a.name[3:]) + 1))
class TestFrameChangeCommand(QiskitTestCase):
"""FrameChange tests."""
def test_default(self):
"""Test default frame change.
"""
fc_command = FrameChange(phase=1.57)
self.assertEqual(fc_command.phase, 1.57)
self.assertEqual(fc_command.duration, 0)
self.assertTrue(fc_command.name.startswith('fc'))
class TestPersistentValueCommand(QiskitTestCase):
"""PersistentValue tests."""
def test_default(self):
"""Test default persistent value.
"""
pv_command = PersistentValue(value=0.5 - 0.5j)
self.assertEqual(pv_command.value, 0.5-0.5j)
self.assertEqual(pv_command.duration, 0)
self.assertTrue(pv_command.name.startswith('pv'))
class TestSnapshotCommand(QiskitTestCase):
"""Snapshot tests."""
def test_default(self):
"""Test default snapshot.
"""
snap_command = Snapshot(label='test_name', snapshot_type='state')
self.assertEqual(snap_command.name, "test_name")
self.assertEqual(snap_command.type, "state")
self.assertEqual(snap_command.duration, 0)
class TestDelayCommand(QiskitTestCase):
"""Delay tests."""
def test_delay(self):
"""Test delay."""
delay_command = Delay(10, name='test_name')
self.assertEqual(delay_command.name, "test_name")
self.assertEqual(delay_command.duration, 10)
class TestFunctionalPulse(QiskitTestCase):
"""SamplePulse tests."""
def test_gaussian(self):
"""Test gaussian pulse.
"""
@functional_pulse
def gaussian(duration, amp, t0, sig):
x = np.linspace(0, duration - 1, duration)
return amp * np.exp(-(x - t0) ** 2 / sig ** 2)
pulse_command = gaussian(duration=10, name='test_pulse', amp=1, t0=5, sig=1)
_y = 1 * np.exp(-(np.linspace(0, 9, 10) - 5)**2 / 1**2)
self.assertListEqual(list(pulse_command.samples), list(_y))
# check name
self.assertEqual(pulse_command.name, 'test_pulse')
# check duration
self.assertEqual(pulse_command.duration, 10)
def test_variable_duration(self):
"""Test generation of sample pulse with variable duration.
"""
@functional_pulse
def gaussian(duration, amp, t0, sig):
x = np.linspace(0, duration - 1, duration)
return amp * np.exp(-(x - t0) ** 2 / sig ** 2)
_durations = np.arange(10, 15, 1)
for _duration in _durations:
pulse_command = gaussian(duration=_duration, amp=1, t0=5, sig=1)
self.assertEqual(len(pulse_command.samples), _duration)
class TestKernel(QiskitTestCase):
"""Kernel tests."""
def test_can_construct_kernel_with_default_values(self):
"""Test if Kernel can be constructed with default name and params."""
kernel = Kernel()
self.assertEqual(kernel.name, None)
self.assertEqual(kernel.params, {})
class TestDiscriminator(QiskitTestCase):
"""Discriminator tests."""
def test_can_construct_discriminator_with_default_values(self):
"""Test if Discriminator can be constructed with default name and params."""
discriminator = Discriminator()
self.assertEqual(discriminator.name, None)
self.assertEqual(discriminator.params, {})
if __name__ == '__main__':
unittest.main()
| 35.040161
| 95
| 0.660401
|
f27edec3d9c03ba9762557f1c859baf59238f330
| 883
|
py
|
Python
|
idem_lxd/exec/lxd/snapshots.py
|
UtahDave/idem-lxd
|
8a0bbe3b3800b8fd1b616be47eb421676af366ec
|
[
"Apache-2.0"
] | null | null | null |
idem_lxd/exec/lxd/snapshots.py
|
UtahDave/idem-lxd
|
8a0bbe3b3800b8fd1b616be47eb421676af366ec
|
[
"Apache-2.0"
] | null | null | null |
idem_lxd/exec/lxd/snapshots.py
|
UtahDave/idem-lxd
|
8a0bbe3b3800b8fd1b616be47eb421676af366ec
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Manage LXD container snapshots
"""
__func_alias__ = {"list_": "list"}
async def list_(hub, ctx, name: str):
"""
List all snapshots for a container
CLI Example:
.. code-block:: bash
idem exec lxd.snapshots.list container01
"""
ret = []
container = await hub.tool.lxd.api.request(ctx, "containers", "get", name=name)
if "error" in container:
return container
snapshots = container.snapshots.all()
for snap in snapshots:
item = await _get_snapshot_info(snap)
ret.append(item)
return {"snapshots": ret}
async def _get_snapshot_info(snap):
"""
Collect snapshot info
"""
item = {}
item[snap.name] = {}
item[snap.name]["name"] = snap.name
item[snap.name]["created_at"] = snap.created_at
item[snap.name]["stateful"] = snap.stateful
return item
| 22.641026
| 83
| 0.616082
|
4f2ba8c7909893332c88d54e37f48148d37bd04f
| 5,156
|
py
|
Python
|
pyclue/features/cohort.py
|
linewalks/py-clue
|
ecea2a37d88387d909fc283fdbc145bc5a1187a8
|
[
"MIT"
] | null | null | null |
pyclue/features/cohort.py
|
linewalks/py-clue
|
ecea2a37d88387d909fc283fdbc145bc5a1187a8
|
[
"MIT"
] | 11
|
2022-01-13T09:30:03.000Z
|
2022-03-14T01:10:08.000Z
|
pyclue/features/cohort.py
|
linewalks/py-clue
|
ecea2a37d88387d909fc283fdbc145bc5a1187a8
|
[
"MIT"
] | null | null | null |
from typing import List, Dict
from clue_pb2 import (
RequestCohortList,
RequestCohortStream
)
from pyclue.converter import convert
from pyclue.stream import Stream
class CohortFeatures:
@convert()
def get_cohort_list(
self,
page: int = 1,
length: int = 0,
term: str = ""
) -> List[Dict]:
"""
Get the list of cohorts.
:param int page:
Page number.
:param int length:
Number of cohorts in a page. If 0, all cohorts will be returned.
:param str term:
Search term.
:return: List of cohorts.
:rtype: List of dictionaries.
"""
cohort_list = self.stub.GetCohortList(RequestCohortList(
term=term,
page=page,
length=length,
)).cohort_list
return cohort_list
def get_cohort_person_table(self, cohort_id: int) -> Stream:
"""
Get person table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortPersonTable,
RequestCohortStream,
cohort_id=cohort_id
)
def get_cohort_person_table(self, cohort_id: int) -> Stream:
"""
Get person table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortPersonTable,
RequestCohortStream,
cohort_id=cohort_id
)
def get_cohort_condition_occurrence_table(self, cohort_id: int) -> Stream:
"""
Get condition_occurrence table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortConditionOccurrenceTable,
RequestCohortStream,
cohort_id=cohort_id
)
def get_cohort_death_table(self, cohort_id: int) -> Stream:
"""
Get death table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortDeathTable,
RequestCohortStream,
cohort_id=cohort_id
)
def get_cohort_device_exposure_table(self, cohort_id: int) -> Stream:
"""
Get device_exposure table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortDeviceExposureTable,
RequestCohortStream,
cohort_id=cohort_id
)
def get_cohort_drug_exposure_table(self, cohort_id: int) -> Stream:
"""
Get drug_exposure table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortDrugExposureTable,
RequestCohortStream,
cohort_id=cohort_id
)
def get_cohort_measurement_table(self, cohort_id: int) -> Stream:
"""
Get measurement table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortMeasurementTable,
RequestCohortStream,
cohort_id=cohort_id
)
def get_cohort_observation_period_table(self, cohort_id: int) -> Stream:
"""
Get observation_period table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortObservationPeriodTable,
RequestCohortStream,
cohort_id=cohort_id
)
def get_cohort_observation_table(self, cohort_id: int) -> Stream:
"""
Get observation table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortObservationTable,
RequestCohortStream,
cohort_id=cohort_id
)
def get_cohort_procedure_occurrence_table(self, cohort_id: int) -> Stream:
"""
Get procedure_occurrence table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortProcedureOccurrenceTable,
RequestCohortStream,
cohort_id=cohort_id
)
def get_cohort_visit_occurrence_table(self, cohort_id: int) -> Stream:
"""
Get visit_occurrence table of a cohort.
Data stream connection will be opened.
:param int cohort_id:
ID of the cohort.
:return: Stream object.
:rtype: Stream
"""
return Stream(
self.stub.GetCohortVisitOccurrenceTable,
RequestCohortStream,
cohort_id=cohort_id
)
| 22.814159
| 76
| 0.647789
|
27e617adde694585cdf9e2a4f6939a26e034c716
| 6,990
|
py
|
Python
|
experiments/ashvin/corl2019/pcvae/pointmass/dynamics_cvae.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/ashvin/corl2019/pcvae/pointmass/dynamics_cvae.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/ashvin/corl2019/pcvae/pointmass/dynamics_cvae.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
import railrl.misc.hyperparameter as hyp
from experiments.murtaza.multiworld.skew_fit.reacher.generate_uniform_dataset import generate_uniform_dataset_reacher
from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in
from railrl.launchers.launcher_util import run_experiment
from railrl.torch.grill.launcher import *
import railrl.torch.vae.vae_schedules as vae_schedules
from railrl.torch.vae.conv_vae import imsize48_default_architecture, imsize48_default_architecture_with_more_hidden_layers
from railrl.launchers.arglauncher import run_variants
from railrl.torch.grill.launcher import grill_her_twin_sac_online_vae_full_experiment, grill_her_twin_sac_full_experiment
from multiworld.envs.pygame.multiobject_pygame_env import Multiobj2DEnv
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj_subset import SawyerMultiobjectEnv
from railrl.torch.vae.conditional_conv_vae import CDVAE
from railrl.torch.vae.vae_trainer import CDVAETrainer
def experiment(variant):
full_experiment_variant_preprocess(variant)
train_vae_and_update_variant(variant)
if __name__ == "__main__":
variant = dict(
double_algo=False,
online_vae_exploration=False,
imsize=48,
init_camera=sawyer_init_camera_zoomed_in,
# env_id='SawyerPushNIPSEasy-v0',
# env_class=SawyerMultiobjectEnv,
# env_kwargs=dict(
# # finger_sensors=False,
# num_objects=10,
# fixed_start=False,
# object_meshes=None,
# num_scene_objects=[1]
# ),
env_class=Multiobj2DEnv,
env_kwargs=dict(
render_onscreen=False,
ball_radius=1.5,
images_are_rgb=True,
show_goal=False,
fixed_colors=True,
change_background=False,
),
grill_variant=dict(
save_video=True,
custom_goal_sampler='replay_buffer',
online_vae_trainer_kwargs=dict(
beta=20,
lr=0,
),
save_video_period=100,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
vf_kwargs=dict(
hidden_sizes=[400, 300],
),
max_path_length=50,
algo_kwargs=dict(
batch_size=128,
num_epochs=1000,
num_eval_steps_per_epoch=500,
num_expl_steps_per_train_loop=500,
num_trains_per_train_loop=5,
min_num_steps_before_training=1000,
vae_training_schedule=vae_schedules.never_train,
oracle_data=False,
vae_save_period=25,
parallel_vae_train=False,
),
twin_sac_trainer_kwargs=dict(
discount=0.98,
reward_scale=1,
soft_target_tau=1e-3,
target_update_period=1, # 1
use_automatic_entropy_tuning=True,
),
replay_buffer_kwargs=dict(
start_skew_epoch=10,
max_size=int(100000),
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
exploration_rewards_type='None',
vae_priority_type='vae_prob',
priority_function_kwargs=dict(
sampling_method='importance_sampling',
decoder_distribution='gaussian_identity_variance',
# decoder_distribution='bernoulli',
num_latents_to_sample=10,
),
power=-1,
relabeling_goal_sampling_mode='vae_prior',
),
exploration_goal_sampling_mode='vae_prior',
evaluation_goal_sampling_mode='reset_of_env',
normalize=False,
render=False,
exploration_noise=0.2,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
),
algorithm='ONLINE-VAE-SAC-BERNOULLI',
),
train_vae_variant=dict(
representation_size=4,
beta=10,
num_epochs=1500,
dump_skew_debug_plots=False,
# decoder_activation='gaussian',
decoder_activation='sigmoid',
use_linear_dynamics=True,
generate_vae_dataset_kwargs=dict(
N=10000,
n_random_steps=5000,
test_p=.9,
use_cached=False,
show=False,
oracle_dataset=False,
oracle_dataset_using_set_to_goal=False,
non_presampled_goal_img_is_garbage=False,
random_rollout_data=True,
conditional_vae_dataset=True,
save_trajectories=False,
enviorment_dataset=False,
),
vae_trainer_class=CDVAETrainer,
vae_class=CDVAE,
vae_kwargs=dict(
dynamics_type='nonlinear',
input_channels=3,
architecture=imsize48_default_architecture_with_more_hidden_layers,
decoder_distribution='gaussian_identity_variance',
),
# TODO: why the redundancy?
algo_kwargs=dict(
start_skew_epoch=5000,
is_auto_encoder=False,
batch_size=32,
lr=1e-3,
skew_config=dict(
method='vae_prob',
power=0,
),
skew_dataset=False,
linearity_weight=100,
distance_weight=100,
priority_function_kwargs=dict(
decoder_distribution='gaussian_identity_variance',
sampling_method='importance_sampling',
# sampling_method='true_prior_sampling',
num_latents_to_sample=10,
),
use_parallel_dataloading=False,
),
save_period=25,
),
)
search_space = {
'seedid': range(1),
'train_vae_variant.representation_size': [(4, 4),], #(3 * objects, 3 * colors)
'train_vae_variant.beta': [20],
'train_vae_variant.generate_vae_dataset_kwargs.n_random_steps': [100]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=4)
| 37.379679
| 122
| 0.587268
|
a191ad89da95b1d5bbd52e22f90c33686df631ef
| 20,444
|
py
|
Python
|
airflow/providers/google/cloud/example_dags/example_vision.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79
|
2021-10-15T07:32:27.000Z
|
2022-03-28T04:10:19.000Z
|
airflow/providers/google/cloud/example_dags/example_vision.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 153
|
2021-10-15T05:23:46.000Z
|
2022-02-23T06:07:10.000Z
|
airflow/providers/google/cloud/example_dags/example_vision.py
|
emilioego/airflow
|
3457c7847cd24413ff5b622e65c27d8370f94502
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23
|
2021-10-15T02:36:37.000Z
|
2022-03-17T02:59:27.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that creates, gets, updates and deletes Products and Product Sets in the Google Cloud
Vision service.
This DAG relies on the following OS environment variables
* GCP_VISION_LOCATION - Zone where the instance exists.
* GCP_VISION_PRODUCT_SET_ID - Product Set ID.
* GCP_VISION_PRODUCT_ID - Product ID.
* GCP_VISION_REFERENCE_IMAGE_ID - Reference Image ID.
* GCP_VISION_REFERENCE_IMAGE_URL - A link to the bucket that contains the reference image.
* GCP_VISION_ANNOTATE_IMAGE_URL - A link to the bucket that contains the file to be annotated.
"""
import os
from airflow import models
from airflow.operators.bash import BashOperator
from airflow.providers.google.cloud.operators.vision import (
CloudVisionAddProductToProductSetOperator,
CloudVisionCreateProductOperator,
CloudVisionCreateProductSetOperator,
CloudVisionCreateReferenceImageOperator,
CloudVisionDeleteProductOperator,
CloudVisionDeleteProductSetOperator,
CloudVisionDeleteReferenceImageOperator,
CloudVisionDetectImageLabelsOperator,
CloudVisionDetectImageSafeSearchOperator,
CloudVisionDetectTextOperator,
CloudVisionGetProductOperator,
CloudVisionGetProductSetOperator,
CloudVisionImageAnnotateOperator,
CloudVisionRemoveProductFromProductSetOperator,
CloudVisionTextDetectOperator,
CloudVisionUpdateProductOperator,
CloudVisionUpdateProductSetOperator,
)
from airflow.utils.dates import days_ago
# [START howto_operator_vision_retry_import]
from google.api_core.retry import Retry # isort:skip pylint: disable=wrong-import-order
# [END howto_operator_vision_retry_import]
# [START howto_operator_vision_product_set_import]
from google.cloud.vision_v1.types import ProductSet # isort:skip pylint: disable=wrong-import-order
# [END howto_operator_vision_product_set_import]
# [START howto_operator_vision_product_import]
from google.cloud.vision_v1.types import Product # isort:skip pylint: disable=wrong-import-order
# [END howto_operator_vision_product_import]
# [START howto_operator_vision_reference_image_import]
from google.cloud.vision_v1.types import ReferenceImage # isort:skip pylint: disable=wrong-import-order
# [END howto_operator_vision_reference_image_import]
# [START howto_operator_vision_enums_import]
from google.cloud.vision import enums # isort:skip pylint: disable=wrong-import-order
# [END howto_operator_vision_enums_import]
GCP_VISION_LOCATION = os.environ.get('GCP_VISION_LOCATION', 'europe-west1')
GCP_VISION_PRODUCT_SET_ID = os.environ.get('GCP_VISION_PRODUCT_SET_ID', 'product_set_explicit_id')
GCP_VISION_PRODUCT_ID = os.environ.get('GCP_VISION_PRODUCT_ID', 'product_explicit_id')
GCP_VISION_REFERENCE_IMAGE_ID = os.environ.get('GCP_VISION_REFERENCE_IMAGE_ID', 'reference_image_explicit_id')
GCP_VISION_REFERENCE_IMAGE_URL = os.environ.get('GCP_VISION_REFERENCE_IMAGE_URL', 'gs://bucket/image1.jpg')
GCP_VISION_ANNOTATE_IMAGE_URL = os.environ.get('GCP_VISION_ANNOTATE_IMAGE_URL', 'gs://bucket/image2.jpg')
# [START howto_operator_vision_product_set]
product_set = ProductSet(display_name='My Product Set')
# [END howto_operator_vision_product_set]
# [START howto_operator_vision_product]
product = Product(display_name='My Product 1', product_category='toys')
# [END howto_operator_vision_product]
# [START howto_operator_vision_reference_image]
reference_image = ReferenceImage(uri=GCP_VISION_REFERENCE_IMAGE_URL)
# [END howto_operator_vision_reference_image]
# [START howto_operator_vision_annotate_image_request]
annotate_image_request = {
'image': {'source': {'image_uri': GCP_VISION_ANNOTATE_IMAGE_URL}},
'features': [{'type': enums.Feature.Type.LOGO_DETECTION}],
}
# [END howto_operator_vision_annotate_image_request]
# [START howto_operator_vision_detect_image_param]
DETECT_IMAGE = {"source": {"image_uri": GCP_VISION_ANNOTATE_IMAGE_URL}}
# [END howto_operator_vision_detect_image_param]
with models.DAG(
'example_gcp_vision_autogenerated_id', start_date=days_ago(1), schedule_interval=None
) as dag_autogenerated_id:
# ################################## #
# ### Autogenerated IDs examples ### #
# ################################## #
# [START howto_operator_vision_product_set_create]
product_set_create = CloudVisionCreateProductSetOperator(
location=GCP_VISION_LOCATION,
product_set=product_set,
retry=Retry(maximum=10.0),
timeout=5,
task_id='product_set_create',
)
# [END howto_operator_vision_product_set_create]
# [START howto_operator_vision_product_set_get]
product_set_get = CloudVisionGetProductSetOperator(
location=GCP_VISION_LOCATION,
product_set_id="{{ task_instance.xcom_pull('product_set_create') }}",
task_id='product_set_get',
)
# [END howto_operator_vision_product_set_get]
# [START howto_operator_vision_product_set_update]
product_set_update = CloudVisionUpdateProductSetOperator(
location=GCP_VISION_LOCATION,
product_set_id="{{ task_instance.xcom_pull('product_set_create') }}",
product_set=ProductSet(display_name='My Product Set 2'),
task_id='product_set_update',
)
# [END howto_operator_vision_product_set_update]
# [START howto_operator_vision_product_set_delete]
product_set_delete = CloudVisionDeleteProductSetOperator(
location=GCP_VISION_LOCATION,
product_set_id="{{ task_instance.xcom_pull('product_set_create') }}",
task_id='product_set_delete',
)
# [END howto_operator_vision_product_set_delete]
# [START howto_operator_vision_product_create]
product_create = CloudVisionCreateProductOperator(
location=GCP_VISION_LOCATION,
product=product,
retry=Retry(maximum=10.0),
timeout=5,
task_id='product_create',
)
# [END howto_operator_vision_product_create]
# [START howto_operator_vision_product_get]
product_get = CloudVisionGetProductOperator(
location=GCP_VISION_LOCATION,
product_id="{{ task_instance.xcom_pull('product_create') }}",
task_id='product_get',
)
# [END howto_operator_vision_product_get]
# [START howto_operator_vision_product_update]
product_update = CloudVisionUpdateProductOperator(
location=GCP_VISION_LOCATION,
product_id="{{ task_instance.xcom_pull('product_create') }}",
product=Product(display_name='My Product 2', description='My updated description'),
task_id='product_update',
)
# [END howto_operator_vision_product_update]
# [START howto_operator_vision_product_delete]
product_delete = CloudVisionDeleteProductOperator(
location=GCP_VISION_LOCATION,
product_id="{{ task_instance.xcom_pull('product_create') }}",
task_id='product_delete',
)
# [END howto_operator_vision_product_delete]
# [START howto_operator_vision_reference_image_create]
reference_image_create = CloudVisionCreateReferenceImageOperator(
location=GCP_VISION_LOCATION,
reference_image=reference_image,
product_id="{{ task_instance.xcom_pull('product_create') }}",
reference_image_id=GCP_VISION_REFERENCE_IMAGE_ID,
retry=Retry(maximum=10.0),
timeout=5,
task_id='reference_image_create',
)
# [END howto_operator_vision_reference_image_create]
# [START howto_operator_vision_reference_image_delete]
reference_image_delete = CloudVisionDeleteReferenceImageOperator(
location=GCP_VISION_LOCATION,
product_id="{{ task_instance.xcom_pull('product_create') }}",
reference_image_id=GCP_VISION_REFERENCE_IMAGE_ID,
retry=Retry(maximum=10.0),
timeout=5,
task_id='reference_image_delete',
)
# [END howto_operator_vision_reference_image_delete]
# [START howto_operator_vision_add_product_to_product_set]
add_product_to_product_set = CloudVisionAddProductToProductSetOperator(
location=GCP_VISION_LOCATION,
product_set_id="{{ task_instance.xcom_pull('product_set_create') }}",
product_id="{{ task_instance.xcom_pull('product_create') }}",
retry=Retry(maximum=10.0),
timeout=5,
task_id='add_product_to_product_set',
)
# [END howto_operator_vision_add_product_to_product_set]
# [START howto_operator_vision_remove_product_from_product_set]
remove_product_from_product_set = CloudVisionRemoveProductFromProductSetOperator(
location=GCP_VISION_LOCATION,
product_set_id="{{ task_instance.xcom_pull('product_set_create') }}",
product_id="{{ task_instance.xcom_pull('product_create') }}",
retry=Retry(maximum=10.0),
timeout=5,
task_id='remove_product_from_product_set',
)
# [END howto_operator_vision_remove_product_from_product_set]
# Product path
product_create >> product_get >> product_update >> product_delete
# ProductSet path
product_set_create >> product_set_get >> product_set_update >> product_set_delete
# ReferenceImage path
product_create >> reference_image_create >> reference_image_delete >> product_delete
# Product/ProductSet path
product_create >> add_product_to_product_set
product_set_create >> add_product_to_product_set
add_product_to_product_set >> remove_product_from_product_set
remove_product_from_product_set >> product_delete
remove_product_from_product_set >> product_set_delete
with models.DAG(
'example_gcp_vision_explicit_id', start_date=days_ago(1), schedule_interval=None
) as dag_explicit_id:
# ############################# #
# ### Explicit IDs examples ### #
# ############################# #
# [START howto_operator_vision_product_set_create_2]
product_set_create_2 = CloudVisionCreateProductSetOperator(
product_set_id=GCP_VISION_PRODUCT_SET_ID,
location=GCP_VISION_LOCATION,
product_set=product_set,
retry=Retry(maximum=10.0),
timeout=5,
task_id='product_set_create_2',
)
# [END howto_operator_vision_product_set_create_2]
# Second 'create' task with the same product_set_id to demonstrate idempotence
product_set_create_2_idempotence = CloudVisionCreateProductSetOperator(
product_set_id=GCP_VISION_PRODUCT_SET_ID,
location=GCP_VISION_LOCATION,
product_set=product_set,
retry=Retry(maximum=10.0),
timeout=5,
task_id='product_set_create_2_idempotence',
)
# [START howto_operator_vision_product_set_get_2]
product_set_get_2 = CloudVisionGetProductSetOperator(
location=GCP_VISION_LOCATION, product_set_id=GCP_VISION_PRODUCT_SET_ID, task_id='product_set_get_2'
)
# [END howto_operator_vision_product_set_get_2]
# [START howto_operator_vision_product_set_update_2]
product_set_update_2 = CloudVisionUpdateProductSetOperator(
location=GCP_VISION_LOCATION,
product_set_id=GCP_VISION_PRODUCT_SET_ID,
product_set=ProductSet(display_name='My Product Set 2'),
task_id='product_set_update_2',
)
# [END howto_operator_vision_product_set_update_2]
# [START howto_operator_vision_product_set_delete_2]
product_set_delete_2 = CloudVisionDeleteProductSetOperator(
location=GCP_VISION_LOCATION, product_set_id=GCP_VISION_PRODUCT_SET_ID, task_id='product_set_delete_2'
)
# [END howto_operator_vision_product_set_delete_2]
# [START howto_operator_vision_product_create_2]
product_create_2 = CloudVisionCreateProductOperator(
product_id=GCP_VISION_PRODUCT_ID,
location=GCP_VISION_LOCATION,
product=product,
retry=Retry(maximum=10.0),
timeout=5,
task_id='product_create_2',
)
# [END howto_operator_vision_product_create_2]
# Second 'create' task with the same product_id to demonstrate idempotence
product_create_2_idempotence = CloudVisionCreateProductOperator(
product_id=GCP_VISION_PRODUCT_ID,
location=GCP_VISION_LOCATION,
product=product,
retry=Retry(maximum=10.0),
timeout=5,
task_id='product_create_2_idempotence',
)
# [START howto_operator_vision_product_get_2]
product_get_2 = CloudVisionGetProductOperator(
location=GCP_VISION_LOCATION, product_id=GCP_VISION_PRODUCT_ID, task_id='product_get_2'
)
# [END howto_operator_vision_product_get_2]
# [START howto_operator_vision_product_update_2]
product_update_2 = CloudVisionUpdateProductOperator(
location=GCP_VISION_LOCATION,
product_id=GCP_VISION_PRODUCT_ID,
product=Product(display_name='My Product 2', description='My updated description'),
task_id='product_update_2',
)
# [END howto_operator_vision_product_update_2]
# [START howto_operator_vision_product_delete_2]
product_delete_2 = CloudVisionDeleteProductOperator(
location=GCP_VISION_LOCATION, product_id=GCP_VISION_PRODUCT_ID, task_id='product_delete_2'
)
# [END howto_operator_vision_product_delete_2]
# [START howto_operator_vision_reference_image_create_2]
reference_image_create_2 = CloudVisionCreateReferenceImageOperator(
location=GCP_VISION_LOCATION,
reference_image=reference_image,
product_id=GCP_VISION_PRODUCT_ID,
reference_image_id=GCP_VISION_REFERENCE_IMAGE_ID,
retry=Retry(maximum=10.0),
timeout=5,
task_id='reference_image_create_2',
)
# [END howto_operator_vision_reference_image_create_2]
# [START howto_operator_vision_reference_image_delete_2]
reference_image_delete_2 = CloudVisionDeleteReferenceImageOperator(
location=GCP_VISION_LOCATION,
reference_image_id=GCP_VISION_REFERENCE_IMAGE_ID,
product_id=GCP_VISION_PRODUCT_ID,
retry=Retry(maximum=10.0),
timeout=5,
task_id='reference_image_delete_2',
)
# [END howto_operator_vision_reference_image_delete_2]
# Second 'create' task with the same product_id to demonstrate idempotence
reference_image_create_2_idempotence = CloudVisionCreateReferenceImageOperator(
location=GCP_VISION_LOCATION,
reference_image=reference_image,
product_id=GCP_VISION_PRODUCT_ID,
reference_image_id=GCP_VISION_REFERENCE_IMAGE_ID,
retry=Retry(maximum=10.0),
timeout=5,
task_id='reference_image_create_2_idempotence',
)
# [START howto_operator_vision_add_product_to_product_set_2]
add_product_to_product_set_2 = CloudVisionAddProductToProductSetOperator(
location=GCP_VISION_LOCATION,
product_set_id=GCP_VISION_PRODUCT_SET_ID,
product_id=GCP_VISION_PRODUCT_ID,
retry=Retry(maximum=10.0),
timeout=5,
task_id='add_product_to_product_set_2',
)
# [END howto_operator_vision_add_product_to_product_set_2]
# [START howto_operator_vision_remove_product_from_product_set_2]
remove_product_from_product_set_2 = CloudVisionRemoveProductFromProductSetOperator(
location=GCP_VISION_LOCATION,
product_set_id=GCP_VISION_PRODUCT_SET_ID,
product_id=GCP_VISION_PRODUCT_ID,
retry=Retry(maximum=10.0),
timeout=5,
task_id='remove_product_from_product_set_2',
)
# [END howto_operator_vision_remove_product_from_product_set_2]
# Product path
product_create_2 >> product_create_2_idempotence >> product_get_2 >> product_update_2 >> product_delete_2
# ProductSet path
product_set_create_2 >> product_set_get_2 >> product_set_update_2 >> product_set_delete_2
product_set_create_2 >> product_set_create_2_idempotence >> product_set_delete_2
# ReferenceImage path
product_create_2 >> reference_image_create_2 >> reference_image_create_2_idempotence
reference_image_create_2_idempotence >> reference_image_delete_2 >> product_delete_2
# Product/ProductSet path
add_product_to_product_set_2 >> remove_product_from_product_set_2
product_set_create_2 >> add_product_to_product_set_2
product_create_2 >> add_product_to_product_set_2
remove_product_from_product_set_2 >> product_set_delete_2
remove_product_from_product_set_2 >> product_delete_2
with models.DAG(
'example_gcp_vision_annotate_image', start_date=days_ago(1), schedule_interval=None
) as dag_annotate_image:
# ############################## #
# ### Annotate image example ### #
# ############################## #
# [START howto_operator_vision_annotate_image]
annotate_image = CloudVisionImageAnnotateOperator(
request=annotate_image_request, retry=Retry(maximum=10.0), timeout=5, task_id='annotate_image'
)
# [END howto_operator_vision_annotate_image]
# [START howto_operator_vision_annotate_image_result]
annotate_image_result = BashOperator(
bash_command="echo {{ task_instance.xcom_pull('annotate_image')"
"['logoAnnotations'][0]['description'] }}",
task_id='annotate_image_result',
)
# [END howto_operator_vision_annotate_image_result]
# [START howto_operator_vision_detect_text]
detect_text = CloudVisionDetectTextOperator(
image=DETECT_IMAGE,
retry=Retry(maximum=10.0),
timeout=5,
task_id="detect_text",
language_hints="en",
web_detection_params={'include_geo_results': True},
)
# [END howto_operator_vision_detect_text]
# [START howto_operator_vision_detect_text_result]
detect_text_result = BashOperator(
bash_command="echo {{ task_instance.xcom_pull('detect_text')['textAnnotations'][0] }}",
task_id="detect_text_result",
)
# [END howto_operator_vision_detect_text_result]
# [START howto_operator_vision_document_detect_text]
document_detect_text = CloudVisionTextDetectOperator(
image=DETECT_IMAGE, retry=Retry(maximum=10.0), timeout=5, task_id="document_detect_text"
)
# [END howto_operator_vision_document_detect_text]
# [START howto_operator_vision_document_detect_text_result]
document_detect_text_result = BashOperator(
bash_command="echo {{ task_instance.xcom_pull('document_detect_text')['textAnnotations'][0] }}",
task_id="document_detect_text_result",
)
# [END howto_operator_vision_document_detect_text_result]
# [START howto_operator_vision_detect_labels]
detect_labels = CloudVisionDetectImageLabelsOperator(
image=DETECT_IMAGE, retry=Retry(maximum=10.0), timeout=5, task_id="detect_labels"
)
# [END howto_operator_vision_detect_labels]
# [START howto_operator_vision_detect_labels_result]
detect_labels_result = BashOperator(
bash_command="echo {{ task_instance.xcom_pull('detect_labels')['labelAnnotations'][0] }}",
task_id="detect_labels_result",
)
# [END howto_operator_vision_detect_labels_result]
# [START howto_operator_vision_detect_safe_search]
detect_safe_search = CloudVisionDetectImageSafeSearchOperator(
image=DETECT_IMAGE, retry=Retry(maximum=10.0), timeout=5, task_id="detect_safe_search"
)
# [END howto_operator_vision_detect_safe_search]
# [START howto_operator_vision_detect_safe_search_result]
detect_safe_search_result = BashOperator(
bash_command="echo {{ task_instance.xcom_pull('detect_safe_search') }}",
task_id="detect_safe_search_result",
)
# [END howto_operator_vision_detect_safe_search_result]
annotate_image >> annotate_image_result
detect_text >> detect_text_result
document_detect_text >> document_detect_text_result
detect_labels >> detect_labels_result
detect_safe_search >> detect_safe_search_result
| 40.96994
| 110
| 0.756457
|
39fa1aa74d1bd6c6989479ff347d8b5feb3b5d75
| 4,025
|
py
|
Python
|
social_core/backends/github.py
|
kim-sojeong/social-core
|
0ce1fa4fbb4bbda4af30482c9484450103aa3922
|
[
"BSD-3-Clause"
] | 3
|
2020-10-03T14:37:40.000Z
|
2021-03-28T17:21:44.000Z
|
social_core/backends/github.py
|
kim-sojeong/social-core
|
0ce1fa4fbb4bbda4af30482c9484450103aa3922
|
[
"BSD-3-Clause"
] | 13
|
2020-03-24T17:53:51.000Z
|
2022-02-10T20:01:14.000Z
|
social_core/backends/github.py
|
kim-sojeong/social-core
|
0ce1fa4fbb4bbda4af30482c9484450103aa3922
|
[
"BSD-3-Clause"
] | 2
|
2019-09-17T12:47:49.000Z
|
2019-09-17T12:47:56.000Z
|
"""
Github OAuth2 backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/github.html
"""
from requests import HTTPError
from six.moves.urllib.parse import urljoin
from .oauth import BaseOAuth2
from ..exceptions import AuthFailed
class GithubOAuth2(BaseOAuth2):
"""Github OAuth authentication backend"""
name = 'github'
API_URL = 'https://api.github.com/'
AUTHORIZATION_URL = 'https://github.com/login/oauth/authorize'
ACCESS_TOKEN_URL = 'https://github.com/login/oauth/access_token'
ACCESS_TOKEN_METHOD = 'POST'
SCOPE_SEPARATOR = ','
REDIRECT_STATE = False
STATE_PARAMETER = True
SEND_USER_AGENT = True
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires'),
('login', 'login')
]
def api_url(self):
return self.API_URL
def get_user_details(self, response):
"""Return user details from Github account"""
fullname, first_name, last_name = self.get_user_names(
response.get('name')
)
return {'username': response.get('login'),
'email': response.get('email') or '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
data = self._user_data(access_token)
if not data.get('email'):
try:
emails = self._user_data(access_token, '/emails')
except (HTTPError, ValueError, TypeError):
emails = []
if emails:
email = emails[0]
primary_emails = [
e for e in emails
if not isinstance(e, dict) or e.get('primary')
]
if primary_emails:
email = primary_emails[0]
if isinstance(email, dict):
email = email.get('email', '')
data['email'] = email
return data
def _user_data(self, access_token, path=None):
url = urljoin(self.api_url(), 'user{0}'.format(path or ''))
return self.get_json(url, params={'access_token': access_token})
class GithubMemberOAuth2(GithubOAuth2):
no_member_string = ''
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
user_data = super(GithubMemberOAuth2, self).user_data(
access_token, *args, **kwargs
)
try:
self.request(self.member_url(user_data), params={
'access_token': access_token
})
except HTTPError as err:
# if the user is a member of the organization, response code
# will be 204, see http://bit.ly/ZS6vFl
if err.response.status_code != 204:
raise AuthFailed(self,
'User doesn\'t belong to the organization')
return user_data
def member_url(self, user_data):
raise NotImplementedError('Implement in subclass')
class GithubOrganizationOAuth2(GithubMemberOAuth2):
"""Github OAuth2 authentication backend for organizations"""
name = 'github-org'
no_member_string = 'User doesn\'t belong to the organization'
def member_url(self, user_data):
return urljoin(
self.api_url(),
'orgs/{org}/members/{username}'.format(
org=self.setting('NAME'),
username=user_data.get('login')
)
)
class GithubTeamOAuth2(GithubMemberOAuth2):
"""Github OAuth2 authentication backend for teams"""
name = 'github-team'
no_member_string = 'User doesn\'t belong to the team'
def member_url(self, user_data):
return urljoin(
self.api_url(),
'teams/{team_id}/members/{username}'.format(
team_id=self.setting('ID'),
username=user_data.get('login')
)
)
| 32.723577
| 76
| 0.580124
|
9629c8a3eb13dfef193097a562b2305f5033f825
| 518
|
py
|
Python
|
00_Original/35_Debugging_und_Qualitaetssicherung/Analyse_des_Laufzeitverhaltens/beispiel_Tracing.py
|
felixdittrich92/Python3_book
|
cd0e2b55aa72c51927d347b70199fb9ed928e06f
|
[
"MIT"
] | null | null | null |
00_Original/35_Debugging_und_Qualitaetssicherung/Analyse_des_Laufzeitverhaltens/beispiel_Tracing.py
|
felixdittrich92/Python3_book
|
cd0e2b55aa72c51927d347b70199fb9ed928e06f
|
[
"MIT"
] | null | null | null |
00_Original/35_Debugging_und_Qualitaetssicherung/Analyse_des_Laufzeitverhaltens/beispiel_Tracing.py
|
felixdittrich92/Python3_book
|
cd0e2b55aa72c51927d347b70199fb9ed928e06f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import trace
import sys
def programm():
for i in range(100):
i**2
for j in range(100):
math.sqrt(j)
for k in range(100):
math.log(k+1)
tracer = trace.Trace(ignoredirs = [sys.prefix, sys.exec_prefix], trace = 0)
tracer.run("programm()")
r = tracer.results()
r.write_results(show_missing=True, coverdir="ergebnis_Tracing")
print("Ergebnis geschrieben nach ergebnis_Tracing/beispiel_Tracing.cover")
| 23.545455
| 75
| 0.642857
|
fc8e742db99aff6d2def1d3be59cf89186f7d0ba
| 2,763
|
py
|
Python
|
mail_pusher/utils.py
|
qq20004604/Mail-Report-System
|
59d2390431251d8ffd0435cab510a37900f2dc17
|
[
"Apache-2.0"
] | 1
|
2020-07-29T08:54:46.000Z
|
2020-07-29T08:54:46.000Z
|
mail_pusher/utils.py
|
qq20004604/Mail-Report-System
|
59d2390431251d8ffd0435cab510a37900f2dc17
|
[
"Apache-2.0"
] | 6
|
2021-03-19T10:24:43.000Z
|
2021-09-22T19:30:43.000Z
|
mail_pusher/utils.py
|
qq20004604/Mail-Report-System
|
59d2390431251d8ffd0435cab510a37900f2dc17
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from django.utils import timezone
from .models import RegVerifyMailModel, REG_EXPIRE_TIME
from package import mail_client
# 检查该用户当前是否可以推送验证码邮件
def can_send_regcode_email(email, ip):
# 验证规则:
# 1. 3 分钟内 1 封;
# 2. 1 小时内 5 封;
# 3. 24 小时 10 封;
# 邮箱地址 和 ip 地址,分别查
now = timezone.now().timestamp()
first_check = RegVerifyMailModel.objects.filter(email=email, push_time__gt=now - 60 * 3)
ip_check = RegVerifyMailModel.objects.filter(ip=ip, push_time__gt=now - 60 * 3)
if len(first_check) > 0 or len(ip_check) > 0:
return '3 分钟内只能发送 1 次验证邮件'
second_check = RegVerifyMailModel.objects.filter(email=email, push_time__gt=now - 60 * 60)
ip_check = RegVerifyMailModel.objects.filter(ip=ip, push_time__gt=now - 60 * 60)
if len(second_check) >= 5 or len(ip_check) > 5:
return '60 分钟内只能发送 5 次验证邮件'
third_check = RegVerifyMailModel.objects.filter(email=email, push_time__gt=now - 60 * 60 * 24)
ip_check = RegVerifyMailModel.objects.filter(ip=ip, push_time__gt=now - 60 * 60 * 24)
if len(third_check) >= 10 or len(ip_check) > 10:
return '24 小时内只能发送 10 次验证邮件'
return None
# 检查邮件和验证码是否正确
def is_regcode_correct(email, regcode):
now = timezone.now().timestamp()
result = RegVerifyMailModel.objects.order_by('-id').filter(email=email, verify_code=regcode)
if len(result) == 0:
return '验证码错误'
# 拿到最新一个
result = result[0]
# 检查时间是否过期
if now - result.push_time > REG_EXPIRE_TIME:
return '验证码已过期'
return None
# 推送验证码邮件
def send_regcode_email(email, regcode):
# send_result = mail_client.send_mail_test()
send_result = mail_client.send_mail(receiver=[email],
title='注册验证',
content=[
'<h4>',
'你的邮箱注册验证码为:',
'</h4>'
'<h2>%s</h2>' % regcode
])
print(send_result)
return send_result
# 推送订阅邮件(都调用这个api推送)
def send_feeds_mail(receiver_list, title, content):
send_result = mail_client.send_mail(receiver=receiver_list,
title=title,
content=content)
print(send_result)
return send_result
# 推送启动邮件邮件(都调用这个api推送)
def send_started_mail():
send_result = mail_client.send_mail(receiver='20004604@qq.com',
title='Mail Report System系统已启动',
content=['Mail Report System系统已启动'])
print(send_result)
return send_result
| 34.974684
| 98
| 0.5827
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.