blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40e0e34dc1c0d39e08775b92089714d9db4af63e | 484b72139db4e2b21268c55e2f32cae242b5abae | /src/tools/adt/test_adt_sql.py | 4cd7abef19fab96d7bce42de488f3e32fcd25a9b | [] | no_license | PIWEEK/piweekr-back | a1b3887358e32d27f7a21913586f77e020c5eae0 | 375dbf17472270dcf34651ff8a86b7ed460eb311 | refs/heads/master | 2020-04-06T07:11:44.175583 | 2016-09-15T06:38:38 | 2016-09-15T06:38:38 | 62,820,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | import pytest
from .types import ADT_WITH_ID, Field, IntField, StrField
from .relationships import Relationship1N, RoleSingle, RoleMulti, Context
from sqlalchemy.sql import select, outerjoin
from .adt_sql import SQLADTRepository
class Card(ADT_WITH_ID):
deck_id = IntField()
title = StrField()
strength = IntField()
defense = IntField()
class Deck(ADT_WITH_ID):
name = StrField()
class DeckHasCards(Relationship1N):
role_1 = RoleSingle(role_class=Deck, role_name="deck")
role_n = RoleMulti(role_class=Card, role_name="cards", role_fk="deck_id", required=False)
def test_sql_persistence():
repo = SQLADTRepository({
"DB_NAME": "test",
"ECHO": False
})
repo.add_adt_table(Card, "cards")
repo.add_adt_table(Deck, "decks")
repo.create_all_tables()
repo.truncate_all_tables()
with repo.context() as context:
deck = repo.insert_adt(context,
repo.decks,
Deck(
name="Test deck"
)
)
card_1 = repo.insert_adt(context,
repo.cards,
Card(
deck_id=deck.id,
title="Test card #1",
strength=10,
defense=1,
)
)
card_2 = repo.insert_adt(context,
repo.cards,
Card(
deck_id=deck.id,
title="Test card #2",
strength=8,
defense=7,
)
)
with repo.context() as context:
r_deck = repo.retrieve_single_adt(context,
Deck,
select([repo.decks])
.where(repo.decks.c.id == deck.id)
)
assert r_deck.id == deck.id
assert r_deck.name == deck.name
with repo.context() as context:
r_cards = repo.retrieve_adts(context,
Card,
select([repo.cards])
)
assert len(r_cards) == 2
assert card_1.id in [card.id for card in r_cards]
assert card_2.id in [card.id for card in r_cards]
with repo.context() as context:
r_decks = repo.retrieve_joined_adts(context,
Deck, {"decks": Deck, "cards": Card},
select([repo.decks, repo.cards], use_labels=True)
.select_from(outerjoin(
repo.decks, repo.cards, repo.decks.c.id == repo.cards.c.deck_id
))
.where(repo.decks.c.id == deck.id)
)
assert len(r_decks) == 1
r_deck = r_decks[0]
assert r_deck.id == deck.id
assert r_deck.name == deck.name
assert len(context.cards(r_deck)) == 2
assert card_1.id in [card.id for card in context.cards(r_deck)]
assert card_2.id in [card.id for card in context.cards(r_deck)]
| [
"andres.moya@kaleidos.net"
] | andres.moya@kaleidos.net |
70ac93f65865b3fb1662de2524dd00f377c1feea | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03474/s623958359.py | 5c4abb6865ad4a5abd0e9ef526ce516ab50152cb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | A, B = input().split(" ")
S = input()
if S[int(A)] == '-' and S[:int(A)].isdecimal() and S[int(A)+1:int(A)+int(B)+1].isdecimal():
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f402f3c6a9657695f8d857cecbdc279ef499cf31 | 640121a717de7b587c56d2136ee9c981bd7c9660 | /server/dvaui/apps.py | b0d4d87d6ecf7c11817286075e99549266b03bd5 | [
"BSD-3-Clause",
"MIT",
"BSL-1.0",
"Apache-2.0"
] | permissive | jbkarle/DeepVideoAnalytics | dd4535e990a7f3af9e53843e6df97340ee0c6b71 | 9a3717ea30a86f97511a150d6538e309e19b7fbc | refs/heads/master | 2020-03-31T15:01:52.854557 | 2018-10-07T17:11:37 | 2018-10-07T17:11:37 | 152,320,716 | 1 | 0 | null | 2018-10-09T21:01:10 | 2018-10-09T21:01:09 | null | UTF-8 | Python | false | false | 150 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class DvauiConfig(AppConfig):
name = 'dvaui'
| [
"akshayubhat@gmail.com"
] | akshayubhat@gmail.com |
6403015b2e85eccd4b5985e9530f4e7ed6ce27bb | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /P.O.R.-master/pirates/battle/FishingRod.py | a362c40a58d8ec0d8eed09445dd7b6dcb932ffd6 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,038 | py | import Weapon
import WeaponGlobals
from pirates.audio import SoundGlobals
from pirates.audio.SoundGlobals import loadSfx
from direct.interval.IntervalGlobal import *
from pandac.PandaModules import *
from pirates.uberdog.UberDogGlobals import InventoryType
from pirates.piratesbase import PLocalizer
from pirates.effects import PolyTrail
import random
class FishingRod(Weapon.Weapon):
modelTypes = [
'models/handheld/pir_m_hnd_tol_fishingPole',
'models/handheld/pir_m_hnd_tol_fishingPoleMed',
'models/handheld/pir_m_hnd_tol_fishingPoleLarge']
models = { }
icons = { }
vertex_list = [
Vec4(0.0, 0.40000000000000002, 0.0, 1.0),
Vec4(0.0, 2.0, 0.0, 1.0),
Vec4(-0.55000000000000004, 2.9500000000000002, 0.0, 1.0)]
motion_color = {
InventoryType.CutlassWeaponL1: [
Vec4(0.29999999999999999, 0.40000000000000002, 0.10000000000000001, 0.5),
Vec4(0.29999999999999999, 0.29999999999999999, 0.29999999999999999, 0.5),
Vec4(0.59999999999999998, 0.59999999999999998, 0.59999999999999998, 0.5)],
InventoryType.CutlassWeaponL2: [
Vec4(0.10000000000000001, 0.20000000000000001, 0.40000000000000002, 0.5),
Vec4(0.40000000000000002, 0.5, 0.69999999999999996, 0.5),
Vec4(0.5, 0.5, 0.90000000000000002, 0.75)],
InventoryType.CutlassWeaponL3: [
Vec4(1, 1, 0.40000000000000002, 0.5),
Vec4(0.40000000000000002, 0.5, 0.59999999999999998, 0.5),
Vec4(0.69999999999999996, 0.69999999999999996, 0.80000000000000004, 0.75)],
InventoryType.CutlassWeaponL4: [
Vec4(0.59999999999999998, 0.59999999999999998, 0.75, 1),
Vec4(0.59999999999999998, 0.5, 0.20000000000000001, 1),
Vec4(0.59999999999999998, 0.59999999999999998, 0.40000000000000002, 1)],
InventoryType.CutlassWeaponL5: [
Vec4(1, 0.20000000000000001, 0.20000000000000001, 0.5),
Vec4(0.5, 0.5, 0.5, 0.75),
Vec4(0.69999999999999996, 0.69999999999999996, 0.90000000000000002, 1)],
InventoryType.CutlassWeaponL6: [
Vec4(1, 1, 0, 0.5),
Vec4(0.29999999999999999, 0.29999999999999999, 0.29999999999999999, 1),
Vec4(0.10000000000000001, 0.10000000000000001, 0.10000000000000001, 1)] }
def __init__(self, itemId):
Weapon.Weapon.__init__(self, itemId, 'fishingRod')
def loadModel(self):
self.prop = self.getModel(self.itemId)
self.prop.reparentTo(self)
def delete(self):
self.endAttack(None)
self.removeTrail()
Weapon.Weapon.delete(self)
def getDrawIval(self, av, ammoSkillId = 0, blendInT = 0.10000000000000001, blendOutT = 0):
track = Parallel(Func(base.playSfx, self.drawSfx, node = av, cutoff = 60), av.actorInterval('sword_draw', playRate = 1.5, endFrame = 15, blendInT = blendInT, blendOutT = blendOutT), Sequence(Wait(0.187), Func(self.attachTo, av)))
return track
def getReturnIval(self, av, blendInT = 0, blendOutT = 0.10000000000000001):
track = Parallel(Func(base.playSfx, self.returnSfx, node = av, cutoff = 60), av.actorInterval('sword_putaway', playRate = 2, endFrame = 35, blendInT = blendInT, blendOutT = blendOutT), Sequence(Wait(0.56000000000000005), Func(self.detachFrom, av)))
return track
def attachTo(self, av):
Weapon.Weapon.attachTo(self, av)
if hasattr(av, 'isGhost') and av.isGhost:
return None
self.createTrail(av)
def detachFrom(self, av):
Weapon.Weapon.detachFrom(self, av)
self.removeTrail()
def createTrail(self, target):
if self.isEmpty():
return None
if not self.motion_trail:
self.motion_trail = PolyTrail.PolyTrail(target, self.vertex_list, self.motion_color.get(self.itemId))
self.motion_trail.reparentTo(self)
self.motion_trail.setUseNurbs(1)
card = loader.loadModel('models/effects/swordtrail_effects')
tex = card.find('**/swordtrail_lines').findTexture('*')
self.motion_trail.setTexture(tex)
self.motion_trail.setBlendModeOn()
if self.itemId == InventoryType.CutlassWeaponL6:
self.motion_trail.setBlendModeOff()
card.removeNode()
def removeTrail(self):
if self.motion_trail:
self.motion_trail.destroy()
self.motion_trail = None
def getBlurColor(self):
return self.motion_color.get(self.itemId)[2]
def beginAttack(self, av):
Weapon.Weapon.beginAttack(self, av)
def setupSounds(cls):
FishingRod.hitSfxs = (loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_CLASHCLANG), loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_SWIPECLANG_01), loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_SWIPECLANG_02), loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_SWIPECLANG_03))
FishingRod.missSfxs = (loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_SWOOSH_01), loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_SWOOSH_02))
FishingRod.skillSfxs = {
InventoryType.FishingRodStall: loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_HACK),
InventoryType.FishingRodPull: loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_HACK),
InventoryType.FishingRodHeal: loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_HACK),
InventoryType.FishingRodTug: loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_HACK),
InventoryType.FishingRodSink: loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_HACK),
InventoryType.FishingRodOceanEye: loadSfx(SoundGlobals.SFX_WEAPON_CUTLASS_SLASH) }
FishingRod.drawSfx = loadSfx(SoundGlobals.SFX_MINIGAME_FISHING_REEL_END)
FishingRod.returnSfx = loadSfx(SoundGlobals.SFX_MINIGAME_FISHING_ROD_OUT)
setupSounds = classmethod(setupSounds)
def getHitSfx():
return FishingRod.hitSfxs
def getMissSfx():
return FishingRod.missSfxs
| [
"brandoncarden12345@gmail.com"
] | brandoncarden12345@gmail.com |
6990e4b87e0734225f804e3c1b915ae8f9869911 | 6ecff67d6103ddbd787f78c35182722b83b8a37e | /백준/Python/알고파/최단경로/1613.py | 4ca4e3cd15c8a04c41309bc939b9e462ad70c7a2 | [] | no_license | jsungmin6/Algorithm | 9ef2339aa00921e7df756a8dff569954a008c118 | bc1ea9de9f7ba3f1aa6616ebef8719540d72e0bf | refs/heads/master | 2023-05-27T06:24:16.123307 | 2021-06-11T09:22:21 | 2021-06-11T09:22:21 | 259,299,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 824 | py | # 역사
# 풀이 과정
'''
여러 출발점과 도착점의 경로가 있냐 없냐를 판단하는 거기 때문에 플로이드 와샬을 사용해
dist를 전부 구한 후 판단한다.
'''
import sys
import collections
input = sys.stdin.readline
n, k = map(int, input().split())
dist = [[0 if i == j else sys.maxsize for i in range(n+1)] for j in range(n+1)]
for _ in range(k):
u, v = map(int, input().split())
dist[u][v] = 1
for k in range(n+1):
for i in range(n+1):
for j in range(n+1):
dist[i][j] = min(dist[i][j], dist[i][k]+dist[k][j])
s = int(input())
for _ in range(s):
s, e = map(int, input().split())
if dist[s][e] == sys.maxsize and dist[e][s] == sys.maxsize:
print(0)
elif dist[s][e] == sys.maxsize:
print(1)
else:
print(-1)
| [
"jsungmin506@gmail.com"
] | jsungmin506@gmail.com |
d1a8f1afed15326e2da00c5d6dc38c274523879a | 1eab07420ddbc6774b0dd6f515da5110ed7344af | /brax/experimental/braxlines/experiments/mimax_sweep.py | 09f8390c0598d7fac24c00e990d51f79f848ae95 | [
"Apache-2.0"
] | permissive | TedTinker/brax | a16097d87607a8bdee46f5d0784fff29e66ca22f | 1d5e70c0c96d1a0dde68901bdabef2c9431c32b3 | refs/heads/main | 2023-08-14T08:22:42.316233 | 2021-09-28T20:08:43 | 2021-09-28T20:08:43 | 410,128,255 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,321 | py | # Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MI-Max Sweep."""
AGENT_MODULE = 'brax.experimental.braxlines.vgcrl.train'
CONFIG = [
dict(
env_name=['ant', 'halfcheetah'],
obs_indices='vel',
algo_name=['gcrl', 'diayn', 'cdiayn', 'diayn_full'],
obs_scale=5.0,
seed=list(range(10)),
normalize_obs_for_disc=False,
evaluate_mi=True,
evaluate_lgr=True,
env_reward_multiplier=0.0,
spectral_norm=True,
ppo_params=dict(
num_timesteps=int(2.5 * 1e8),
reward_scaling=10,
episode_length=1000,
normalize_observations=True,
action_repeat=1,
unroll_length=5,
num_minibatches=32,
num_update_epochs=4,
discounting=0.95,
learning_rate=3e-4,
entropy_cost=1e-2,
num_envs=2048,
batch_size=1024,
)),
dict(
env_name=[
'humanoid',
],
obs_indices='vel',
algo_name=['gcrl', 'diayn', 'cdiayn', 'diayn_full'],
obs_scale=5.0,
seed=list(range(10)),
normalize_obs_for_disc=False,
evaluate_mi=True,
evaluate_lgr=True,
env_reward_multiplier=0.0,
spectral_norm=True,
ppo_params=dict(
num_timesteps=int(2.5 * 1e8),
log_frequency=20,
reward_scaling=0.1,
episode_length=1000,
normalize_observations=True,
action_repeat=1,
unroll_length=10,
num_minibatches=16,
num_update_epochs=8,
discounting=0.97,
learning_rate=1e-4,
entropy_cost=1e-3,
num_envs=2048,
batch_size=1024,
)),
]
| [
"erikfrey@google.com"
] | erikfrey@google.com |
69cf9b378381c44f262b107a06c246bdbc38bfdd | a148a0a0fb3209c754b9d6836baa837d3c02e30f | /garbage_code_dont_delete_btw/find_optimized_parameters.py | 426e19aca057cbbe4dbbada8cc902f56d9b0225a | [] | no_license | GreenBlitz/Deep-Space-Vision | 645b64f98bf26500ba501651d332a8cd82f0f340 | c0d8ad10cf42fbac79b42141a44c3bbb00beabb7 | refs/heads/master | 2020-03-28T19:32:30.563636 | 2018-12-13T16:59:21 | 2018-12-13T16:59:21 | 148,985,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,813 | py | import cv2
import random
import numpy as np
def create_params(shape, factor):
return np.random.rand(*shape)*factor
def get_score(item, frame, bbox, func, reg):
frametag = func(frame, item)
f = frametag[bbox[1]:bbox[1] + bbox[3], bbox[0]:bbox[0] + bbox[2]]
s = f.sum()
return s/f.size - (frametag.sum() - s)/(frametag.size - f.size) - reg*(np.abs(item[:,0] - item[:,1]).sum())
def create_child(sur, alpha, factor):
child = np.sign(np.random.rand(*sur[0].shape))* 10**(-alpha * np.random.rand(*sur[0].shape))*factor
for i in range(len(sur[0])):
child[i] += random.choice(sur)[i]
return child
def find_optimized_parameters(function, images, bboxes, p_shape, gen_size=50, survivors_size=0, p_factor=255, alpha=50, max_iter=100, gen_random=5, c_factor=1, range_regulator=0.5):
gen = []
scores = []
all_scores = []
best = None
max_score = -np.inf
for i in range(gen_size):
gen.append(create_params(p_shape, p_factor))
for _ in range(max_iter):
scores = []
all_scores.append(0)
for i in gen:
sum = 0
for j, im in enumerate(images):
sum += get_score(i, im, bboxes[j], function, range_regulator)
scores.append([i, sum])
all_scores[_] = max(all_scores[_], sum)
if sum > max_score:
max_score = sum
best = i
survivors = list(map(lambda x: x[0].flatten(), sorted(scores, key=lambda x: x[1], reverse=True)))[:survivors_size]
gen.clear()
for i in range(gen_size-gen_random):
gen.append(create_child(survivors, alpha, c_factor).reshape(p_shape))
for i in range(gen_random):
gen.append(create_params(shape=p_shape, factor=p_factor))
return best, all_scores | [
"idohaineman@gmail.com"
] | idohaineman@gmail.com |
b41ba04907184cd37b158434c5d33df39713c56e | 48519d4299911ce2a3ca70043079df419155c156 | /typistry/test/validations_test.py | 2ca7ee5e91bfad1903c8486d39490fa063915396 | [
"Apache-2.0"
] | permissive | kyprifog/typistry | 18c3e010925db5b4a2422bc6eefb69d5da4c2ab9 | aab285d909791106154874eb5331b65fc03849ae | refs/heads/master | 2023-03-12T21:29:09.998425 | 2021-03-02T16:24:48 | 2021-03-02T16:24:48 | 343,979,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,756 | py | from os import path
from shutil import copytree, rmtree
from typing import List, Union, Any, Tuple
from typistry.test.support.types.other_class import OtherClass
from typistry.protos.invalid_object import InvalidObject
from typistry.test.support.types.test_class import TestClass
from typistry.util.path import from_root
from typistry.validators.base import validate_files, filter_type
class TestValidations:
def schemas_path(self) -> str:
return from_root("/test/support/validations/")
def yaml_path(self) -> str:
return from_root("/test/support/yaml/")
def all_protos(self) -> List[Union[Any, InvalidObject]]:
return validate_files(self.yaml_path(), self.schemas_path())
def filter_by_types(self, all: List[Any]) -> Tuple[List[TestClass], List[OtherClass], List[InvalidObject]]:
test_class = filter_type(all, TestClass)
other_class = filter_type(all, OtherClass)
invalid = filter_type(all, InvalidObject)
return (test_class, other_class, invalid)
def test_single_file(self):
yaml_path = from_root("/test/support/yaml/test_class/good_1.yaml")
obj = validate_files(yaml_path, self.schemas_path())
assert(len(obj) == 1)
assert(isinstance(obj[0], TestClass))
def test_directory(self):
all = self.all_protos()
test_class, other_class, invalid = self.filter_by_types(all)
assert(len(test_class) == 2)
assert(len(other_class) == 1)
# Tests protoclass build definition adds 10, otherwise would be 2
assert(other_class[0].test == 12)
assert(len(invalid) == 3)
def test_default_schema_path(self):
default_path = "validations/"
if not path.exists(default_path):
copytree(self.schemas_path(), default_path)
all = validate_files(self.yaml_path())
assert(len(all) == 6)
test_class, other_class, invalid = self.filter_by_types(all)
assert(len(test_class) == 2)
assert(len(other_class) == 1)
assert(len(invalid) == 3)
if path.exists(default_path):
rmtree(default_path)
def test_to_class(self):
test_class_all = validate_files(self.yaml_path(), self.schemas_path(), to_class = TestClass)
test_class, other_class, invalid = self.filter_by_types(test_class_all)
assert(len(test_class) == 2)
assert(len(other_class) == 0)
assert(len(invalid) == 4)
other_class_all = validate_files(self.yaml_path(), self.schemas_path(), to_class = OtherClass)
test_class, other_class, invalid = self.filter_by_types(other_class_all)
assert(len(test_class) == 0)
assert(len(other_class) == 1)
assert(len(invalid) == 5)
| [
"kprifogle1@gmail.com"
] | kprifogle1@gmail.com |
d97ca44c9ac250ee5169fb9662a6ae9b5cd84709 | 3bd961816fe9b9048108f8a5a254b931dd79bde4 | /manga_py/providers/mangamew_com.py | 468cfc62dbc030c91cf86ee739264971ff06e7d1 | [
"MIT"
] | permissive | eduhoribe/manga-py | 6243115549d78c1599c6b043fe7cd897e2f517d3 | fe7eb2e08532b3c75b4f7ac8cc4132f0e7a65eb4 | refs/heads/stable_1.x | 2023-01-14T01:48:34.873530 | 2020-11-17T04:30:15 | 2020-11-17T04:30:15 | 307,992,359 | 1 | 0 | MIT | 2020-11-15T00:00:45 | 2020-10-28T11:18:18 | Python | UTF-8 | Python | false | false | 1,069 | py | from manga_py.provider import Provider
from .helpers.std import Std
class MangaMewCom(Provider, Std):
_type = 'manga'
def get_chapter_index(self) -> str:
re = r'%s/[^/]+/.+?-(\d+(?:-\d+)?)-\d+' % self._type
return self.re.search(re, self.chapter).group(1)
def get_main_content(self):
url = self.get_url()
if url.find('/' + self._type + '/') == -1: # not found
a = self.html_fromstring(url, 'h1.name a', 0)
url = a.get('href')
return self.http_get(url)
def get_manga_name(self) -> str:
content = self.http_get(self.get_url())
return self.text_content(content, 'h1.name a,h1.title')
def get_chapters(self):
return self._elements('.chapter .item a')[::-1]
def get_files(self):
parser = self.html_fromstring(self.chapter)
return self._images_helper(parser, '#content .item > img')
def get_cover(self) -> str:
return self._cover_from_content('.images img')
def book_meta(self) -> dict:
pass
main = MangaMewCom
| [
"sttv-pc@mail.ru"
] | sttv-pc@mail.ru |
5dabeb4a2c1b694a6e37ad5f0562da8805237de6 | adaf5d5cd4c46db0387f6dfd7de34d38cf3b06d0 | /Commands/Mtg.py | 2b91b0e30d456e99519e5a1ddd864ef11a563fa3 | [
"MIT"
] | permissive | CrushAndRun/PyMoronBot-LugNut | b0f23437a18fb27ee22313469ad2a396ddaa8f13 | d695d8f36b23fc584b3c7d795c12a9e4577c806b | refs/heads/master | 2020-02-26T13:42:41.197328 | 2017-12-07T15:36:58 | 2017-12-07T15:36:58 | 67,321,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,358 | py | # -*- coding: utf-8 -*-
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
from Utils import WebUtils
import re
from bs4 import BeautifulSoup
class Mtg(CommandInterface):
triggers = ['mtg', 'mtgf']
help = 'mtg(f) <card name> - fetches details of the Magic: The Gathering card you specify ' \
'from gatherer.wizards.com. mtgf includes the flavour text, if it has any'
def execute(self, message):
"""
@type message: IRCMessage
"""
searchTerm = 'http://gatherer.wizards.com/pages/search/default.aspx?name='
for param in message.ParameterList:
searchTerm += '+[%s]' % param
webPage = WebUtils.fetchURL(searchTerm)
soup = BeautifulSoup(webPage.body)
name = soup.find('div', {'id': 'ctl00_ctl00_ctl00_MainContent_SubContent_SubContent_nameRow'})
if name is None:
searchResults = soup.find('div', {'id': 'ctl00_ctl00_ctl00_MainContent_SubContent_SubContent_searchResultsContainer'})
if searchResults is None:
return IRCResponse(ResponseType.Say, 'No cards found: ' + searchTerm, message.ReplyTo)
else:
cardItems = searchResults.find_all(class_='cardItem')
# potentially return first item here
return IRCResponse(ResponseType.Say, '{0} cards found: {1}'.format(len(cardItems), searchTerm), message.ReplyTo)
name = name.find('div', 'value').text.strip()
types = u' | T: ' + soup.find('div', {'id': 'ctl00_ctl00_ctl00_MainContent_SubContent_SubContent_typeRow'}).find('div', 'value').text.strip()
rarity = u' | R: ' + soup.find('div', {'id': 'ctl00_ctl00_ctl00_MainContent_SubContent_SubContent_rarityRow'}).find('div', 'value').text.strip()
manaCost = soup.find('div', {'id': 'ctl00_ctl00_ctl00_MainContent_SubContent_SubContent_manaRow'})
if manaCost is not None:
manaCost = unicode(manaCost.find('div', 'value'))
manaCost = u' | MC: ' + self.translateSymbols(manaCost)
manaCost = re.sub('<[^>]+?>', '', manaCost)
manaCost = manaCost.replace('\n', '')
else:
manaCost = u''
convCost = soup.find('div', {'id': 'ctl00_ctl00_ctl00_MainContent_SubContent_SubContent_cmcRow'})
if convCost is not None:
convCost = u' | CMC: ' + convCost.find('div', 'value').text.strip()
else:
convCost = u''
cardText = soup.find('div', {'id': 'ctl00_ctl00_ctl00_MainContent_SubContent_SubContent_textRow'})
if cardText is not None:
cardTexts = cardText.find_all('div', 'cardtextbox')
texts = []
for text in cardTexts:
text = self.translateSymbols(text)
text = re.sub('<[^>]+?>', '', text)
texts.append(text)
cardText = u' | CT: ' + u' > '.join(texts)
else:
cardText = u''
flavText = soup.find('div', {'id': 'ctl00_ctl00_ctl00_MainContent_SubContent_SubContent_FlavorText'})
if message.Command.endswith('f') and flavText is not None:
flavTexts = flavText.find_all('div', 'cardtextbox')
texts = []
for text in flavTexts:
texts.append(unicode(text.text))
flavText = u' | FT: ' + ' > '.join(texts)
else:
flavText = u''
powTough = soup.find('div', {'id': 'ctl00_ctl00_ctl00_MainContent_SubContent_SubContent_ptRow'})
if powTough is not None:
powTough = u' | P/T: ' + powTough.find('div', 'value').text.strip().replace(' ', '')
else:
powTough = u''
reply = name + manaCost + convCost + types + cardText + flavText + powTough + rarity
return IRCResponse(ResponseType.Say, reply, message.ReplyTo)
@classmethod
def translateSymbols(cls, text):
text = unicode(text)
text = re.sub(r'<img.+?name=(tap).+?>', r'Tap', text) # tap
text = re.sub(r'<img.+?name=([0-9]{2,}).+?>', r'\1', text) # long numbers
text = re.sub(r'<img.+?name=([^&"])([^&"]).+?>', r'{\1/\2}', text) # hybrids
text = re.sub(r'<img.+?name=([^&"]+).+?>', r'\1', text) # singles and any 'others' left over
return text
| [
"matthewcpcox@gmail.com"
] | matthewcpcox@gmail.com |
e8dd5fdaccd0096013f6662954213832a0879e9a | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Link/LinkChecker-9.3/linkcheck/checker/__init__.py | 62202815b17c0292ffab6e86859598e16e02a15a | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:ed0ddaf86ed59f018b4cf89cd2abc32f6e966da1ceeb748d5678f763bef305b1
size 6120
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
199e2fbaeb8a63f651532d83d07851ad76bdda71 | 3c92c92f588ba1156c3579683d3d8b7d12aef652 | /test.py | caf3bab175467be3b3c5100db5eaf6157fac8118 | [] | no_license | JoseAVallejo12/sempliMl | 2dace46732c67f9a2b1a035db449e1ee7170a77c | 3117fcd5a473f0fe7756d58f8707cb447193d7fc | refs/heads/master | 2022-12-26T19:23:30.429927 | 2020-10-13T19:45:17 | 2020-10-13T19:45:17 | 303,531,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | #!env/bin/python3
import requests
# Response a newUser random using the request.get and parsing to object with json() method
params = requests.get('https://lv2394qpu0.execute-api.us-east-1.amazonaws.com/dev/user/random').json()
# Define the headers for sent an request to API clostering customer
headers = {'Content-Type': 'application/json'}
# Response a clostering user using the request.get and parsing to object with json() method
res = requests.post("https://lv2394qpu0.execute-api.us-east-1.amazonaws.com/dev/user/cluster", params=params, headers=headers).json()
# Print inf of user
print(f'user data sent: {params}')
# Print cluster user
print(f"clustering user: {res}") | [
"josealfredovallejo25@gmail.com"
] | josealfredovallejo25@gmail.com |
0a3898ac1c39e49d42d319ba852271926310d148 | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/pybites/PyBites-master/NAMES.PY | 31a0aabe5eab8555bb9e8f89529c771d14e56474 | [
"MIT"
] | permissive | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 858 | py | NAMES = ['arnold schwarzenegger', 'alec baldwin', 'bob belderbos',
'julian sequeira', 'sandra bullock', 'keanu reeves',
'julbob pybites', 'bob belderbos', 'julian sequeira',
'al pacino', 'brad pitt', 'matt damon', 'brad pitt']
def dedup_and_title_case_names(names):
"""Should return a list of names, each name appears only once"""
return list({name.title() for name in names})
def sort_by_surname_desc(names):
"""Returns names list sorted desc by surname"""
names = dedup_and_title_case_names(names)
return sorted(names,
key=lambda x: x.split()[-1],
reverse=True)
def shortest_first_name(names):
"""Returns the shortest first name (str)"""
names = dedup_and_title_case_names(names)
names = [name.split()[0] for name in names]
return min(names, key=len)
| [
"sergejyurskyj@yahoo.com"
] | sergejyurskyj@yahoo.com |
52475cb0f587a30f869a90240fe288a74769a8a0 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_1_1_neat/16_1_1_shaun_lee_lastword.py | 162809f1c2e6062991a66a8085d652bce0400c30 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 648 | py | from __future__ import print_function
import sys
def read_input(in_file):
T = int(in_file.readline().strip())
result = [line.strip() for line in in_file]
return result
def check_case(S):
result = ""
for c in S:
if c + result > result + c:
result = c + result
else:
result += c
return result
def main():
input_filename = sys.argv[1]
with open(input_filename) as input_file:
case_no = 0
for case in read_input(input_file):
case_no += 1
print("Case #" + str(case_no) + ": " + check_case(case))
if __name__ == '__main__':
main()
| [
"[dhuo@tcd.ie]"
] | [dhuo@tcd.ie] |
e7dfab6a95b879c74086ca60e363d1ccef110c97 | 96a34a048c783a75736bf0ec775df22142f9ee53 | /services/web/server/src/simcore_service_webserver/scicrunch/__init__.py | 338202607956135f6b5d4b7eb2066805397f8190 | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 478 | py | """
Submodule to interact with K-Core's https://scicrunch.org service
- client to validate and get info about RRIDs via scicrunch's API (service_client)
- keeps validated RRIDs in pg-database (scicrunch.db)
- define models for all interfaces: scicrunch API, postgres DB and webserver API (scicrunch_models)
NOTE: should have no dependencies with other modules in this service
Initial design: https://github.com/ITISFoundation/osparc-simcore/pull/2045
"""
| [
"noreply@github.com"
] | ITISFoundation.noreply@github.com |
53dc5dbad44d62299a2771b4f46026d73806497f | 3c750d4d60660fdf6ef84d7b7ab9663fb76d0fa1 | /sopht/numeric/eulerian_grid_ops/poisson_solver_3d/scipy_fft_3d.py | 4e90e7708b6623f2626783e66e3daaf05b5eb5bd | [
"MIT"
] | permissive | SophT-Team/SophT | 25d157a17734600e9aa4f522b4574bfefe202bc7 | 99a094e0d6e635e5b2385a69bdee239a4d1fb530 | refs/heads/main | 2023-08-31T21:14:10.304592 | 2023-08-31T17:00:38 | 2023-08-31T17:00:38 | 498,451,510 | 2 | 2 | MIT | 2023-09-12T15:37:31 | 2022-05-31T18:25:12 | Python | UTF-8 | Python | false | false | 457 | py | """Create reference FFT operations via scipy in 3D."""
import numpy as np
from scipy.fft import irfftn, rfftn
def fft_ifft_via_scipy_kernel_3d(
fourier_field: np.ndarray,
inv_fourier_field: np.ndarray,
field: np.ndarray,
num_threads: int = 1,
) -> None:
"""Perform reference FFT operations via scipy."""
fourier_field[...] = rfftn(field, workers=num_threads)
inv_fourier_field[...] = irfftn(fourier_field, workers=num_threads)
| [
"bhosale2@illinois.edu"
] | bhosale2@illinois.edu |
d5c82675fd32505beabe8291bcae1e2d6bd02ffa | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r9/Gen/DecFiles/options/46000030.py | 67f8dc02c09f3261e98f8ceb9313e2ba312969dc | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r9/Gen/DecFiles/options/46000030.py generated: Fri, 27 Mar 2015 16:10:12
#
# Event Type: 46000030
#
# ASCII decay Descriptor: pp -> (X -> ~chi_10 -> (l q q, l l l) + jet ... )
#
from Configurables import Generation
Generation().EventType = 46000030
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/BRpVNeutralino_m0600_m12400.dec"
Generation().Special.CutTool = "PythiaLSP"
from Configurables import LHCb__ParticlePropertySvc
LHCb__ParticlePropertySvc().OtherFiles = ["$DECFILESROOT/ppfiles/mSUGRA_m0600_m12400.tbl"]
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/SusyBRpV.py" )
from Configurables import PythiaProduction
Generation().Special.addTool( PythiaProduction )
Generation().Special.PythiaProduction.SLHASpectrumFile = "mSUGRA_m0600_m12400.LHspc"
| [
"slavomirastefkova@b2pcx39016.desy.de"
] | slavomirastefkova@b2pcx39016.desy.de |
a3b2f4ad14a1175927087259c39561072d324996 | eef243e450cea7e91bac2f71f0bfd45a00c6f12c | /.history/app/api_service/nlp_processing_20210124215235.py | 849149836d612782e4ed41b1146fa410bb3532f6 | [] | no_license | hoaf13/nlp-chatbot-lol | 910ab2ea3b62d5219901050271fc1a1340e46a2f | 18cb64efa9d6b4cafe1015f1cd94f4409271ef56 | refs/heads/master | 2023-05-08T04:17:19.450718 | 2021-02-02T02:37:38 | 2021-02-02T02:37:38 | 332,535,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,301 | py | import csv
import json
import numpy as np
import sklearn
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import numpy
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.layers import LSTM, GRU,Bidirectional, Flatten, Dense
from keras_self_attention import SeqSelfAttention
import csv, re
import json
import numpy as np
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
from keras import optimizers
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras_self_attention import SeqSelfAttention, SeqWeightedAttention
dict_intent={
'build_item':0,
'support_socket':1,
'counter':2,
'be_countered':3,
'skill_up':4,
'how_to_play':5,
'combo':6,
'combine_with':7,
'how_to_use_skill':8,
'introduce':9
}
dict_digit2intent = {}
key = 0
for i in dict_intent.keys():
dict_digit2intent[key] = i
key += 1
f = open('./my_upload/champions.txt', "r")
reg = ""
for cham in f:
# print (cham.split ('\n')[0])
reg += cham.split ('\n')[0] + '|'
print (reg)
reg = reg[:-1]
print("REG: {}".format(reg))
f.close()
skills = ['q', 'w', 'e' , 'r']
def get_entity(content):
# content = content.lower()
hero = re.search(reg, content)
if hero != None:
hero = hero.group()
else: hero = ""
spl = content.split(" ")
skill = ""
for i in spl:
if i in skills:
skill = i
break
return hero, skill
def load_model():
model = Sequential()
model.add(Embedding(208, 5248, input_length=17))
model.add(Bidirectional(LSTM(128, return_sequences=True)))
# model.add(LSTM(128, return_sequences = True))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))
model.compile(loss= 'categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
model.load_weights('./my_upload/hoaf13-nlp.h5')
model.summary()
return model
def process_content(reg, content):
# content = content.lower()
x = re.search(reg, content)
if x != None:
content = content.replace(x.group(), "{hero}")
return content
def process_data(model, content):
f = open('./my_upload/bow.txt', 'r')
dictionary = ''
for word in f:
dictionary += word + " "
f.close()
data = [dictionary]
token_obj = Tokenizer()
token_obj.fit_on_texts(data)
max_len = 17
X_train_token = token_obj.texts_to_sequences([content])
X_pad = pad_sequences(X_train_token, maxlen=max_len, padding='post')
result = model.predict(X_pad)
intent = np.argmax(result)
hero, skill = get_entity(content)
return dict_digit2intent[intent], result[0][intent], hero, skill
model_nlp = load_model()
m | [
"samartcall@gmail.com"
] | samartcall@gmail.com |
8fe94a63a6e963f1ad1e1f239fe6261d16869520 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /vBwRuR4mF5yQ4cNuc_17.py | cf1ee398857363c1811e1931002d97f5cddeae37 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py |
def count_missing_nums(lst):
out = [int(i) for i in lst if i.isdigit()]
return sum(i not in out for i in range(min(out), max(out)+1))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
092e0a9e39100e338aa1e0c4db77d873972d49ee | 306045a1cd0fb362f46d4db88311f442311bbc16 | /examples/idioms/programs/158.2163-random-sublist.py | 3f736d4ad21fd5030145e3bb3f6f9a976ee7b66f | [
"MIT"
] | permissive | laowantong/paroxython | 608c9010a2b57c8f7ed5ea309e24035c2b2e44a3 | a6d45829dd34f046d20e5bae780fbf7af59429cb | refs/heads/master | 2023-09-01T05:18:29.687916 | 2022-11-07T17:40:31 | 2022-11-07T17:40:31 | 220,820,424 | 36 | 5 | MIT | 2023-09-08T04:44:58 | 2019-11-10T16:54:56 | Python | UTF-8 | Python | false | false | 566 | py | """Random sublist.
Create a new list _y from randomly picking exactly _k elements from list _x.
It is assumed that _x has at least _k elements.
Each element must have same probability to be picked.
Each element from _x must be picked _at _most _once.
Explain if the original ordering is preserved or not.
Source: programming-idioms.org
"""
# Implementation author: Oldboy
# Created on 2017-10-28T13:10:20.094421Z
# Last modified on 2018-06-24T13:08:32.325774Z
# Version 2
# The original ordering is not preserved.
import random
y = random.sample(x, k)
| [
"laowantong@users.noreply.github.com"
] | laowantong@users.noreply.github.com |
8898fd8584b627d6221f4ec6682599576dd3016c | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/429/usersdata/308/99324/submittedfiles/jogoDaVelha_BIB.py | 3cd55426284682db76a7c18bcbf132edaafce8bb | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,553 | py | # -*- coding: utf-8 -*-
from random import randint
import copy
def verificaVencedor(tabela): #Verifica se há vencedor || Parâmetro: Recebe a tabela para verificar || Retorno: Retorna o simbolo do vencedor
for a in range(0, 3):
if tabela[0][a]==tabela[1][a]==tabela[2][a]:
print('if 1')
return tabela[1][a]
if tabela[a][0]==tabela[a][1]==tabela[a][2]:
print('if 2')
return tabela[a][0]
if (tabela[0][0]==tabela[1][1]==tabela[2][2]) or (tabela[2][0]==tabela[1][1]==tabela[0][2]):
print('if 3')
return tabela[1][1]
return ' '
def verificaIgualdade(valora, valorb ): #Verifica se sao iguais e retorna falso ou verdadeiro
if valora==valorb:
return True
else:
return False
def solicitaNomeDoJogador(): #Solicita nome || Retorno: Nome do usuário
nome=str(input('Qual o seu nome (ou apelido)? '))
return nome
def solicitaSimboloDoHumano(): #Solicita simbolo || Retorno: Array = [Simbolo do humano, simbolo do computador]
simbolo=str.upper(input('Qual simbolo você deseja utilizar no jogo? '))
while simbolo not in ('X','O'):
simbolo=str.upper(input('Qual simbolo você deseja utilizar no jogo? '))
if simbolo== 'X':
scomputador='O'
else:
scomputador='X'
return [simbolo, scomputador]
def sorteioPrimeiraJogada(nomes): #Sorteio || Parâmetro: Nome dos jogadores e tabela || Retorno: Retorna o resultado do sorteio (0 ou 1)
resultado = randint(0, 1)
print('Vencedor do sorteio para inicio do jogo: %s' % nomes[resultado][0])
return resultado
def validaJogada(jogada, visual): #Valida jogada || Paraâmetro: Nome dos jogadores e tabela || Retorno: Verdadeiro ou falso
try:
return not verificaIgualdade(visual[int(jogada[0])][int(jogada[2])],' ')
except:
return True
def jogadaHumana(nomes, tabela): #Jogada humana || Parâmetro: Nome dos jogadores e tabela || Retorno: Tabela modificada
jogada = input('Qual sua jogada, %s: ' % nomes[0][0])
while validaJogada(jogada, tabela):
print('OPS!!! Essa jogada não está disponível. Tente novamente!')
jogada = input('Qual sua jogada, %s: ' % nomes[0][0])
tabela[int(jogada[0])][int(jogada[2])] = nomes[0][1]
return tabela
def mostraTabuleiro(visual): #Mostrar tabuleiro || Parâmetro: tabela para ser mostrada
for i in range (0, 3):
print(str(visual[i][0]) + ' | '+ str(visual[i][1]) + ' | '+ str(visual[i][2]))
def jogarNovamente(): #Pergunta se deseja jogar novamente || Retorno: Verdadeiro ou falso
x = input('Deseja jogar novamente? (S ou N) ')
return verificaIgualdade(str.upper(x), 'N')
def jogadaComputador(nomes, tabela): #Jogada do computador || Parâmetro: Nome dos jogadores e tabela || Retorno: Tabela modificada
if tabela == [[' ',' ', ' '], [' ', ' ',' '], [' ', ' ', ' ']] or tabela == [[' ',' ', ' '], [' ',nomes[0][1],' '], [' ', ' ', ' ']]:
lista = ['0 0', '0 2', '2 0', '2 2']
jogada = lista[randint(0, 3)]
tabela[int(jogada[0])][int(jogada[2])] = nomes[1][1]
return tabela
for jogador in [nomes[1][1], nomes[0][1]]:
for i in range(0, 3):
for j in range(0, 3):
if not validaJogada(('%d %d' % (i, j)), tabela):
copia = copy.deepcopy(tabela)
copia[i][j] = jogador
if verificaVencedor(copia) in ['X', 'O']:
tabela[i][j] = nomes[1][1]
return tabela
for i in range(0, 3):
for j in range(0, 3):
if not validaJogada(('%d %d' % (i, j)), tabela):
copia = copy.deepcopy(tabela)
copia[i][j] = nomes[1][1]
for k in range(0, 3):
for l in range(0, 3):
copia2 = copy.deepcopy(copia)
if not validaJogada(('%d %d' % (k, l)), copia2):
copia2[k][l] = nomes[1][1]
if verificaVencedor(copia2) == nomes[1][1]:
tabela[i][j] = nomes[1][1]
return tabela
if not validaJogada('1 1', tabela):
tabela[1][1] = nomes[1][1]
return tabela
jogada = ('%d %d' % (randint(0, 2), randint(0, 2)))
while validaJogada(jogada, tabela):
jogada = ('%d %d' % (randint(0, 2), randint(0, 2)))
tabela[int(jogada[0])][int(jogada[2])] = nomes[1][1]
return tabela | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
978517d22ef87134a9543194451cef9bb9403512 | ce66de2e9b099b8b53f52721e649bce3e9d42734 | /python/ray/ml/predictors/integrations/sklearn/sklearn_predictor.py | 91a117cb878acc3efff31d27cb8e260ec6335d6d | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | pengzhenghao/ray | 0428d0b7d479f112c7649577534dfdfdfe7ea2e7 | e57ce7efd6ea1d0e4f6942fcf6f526287340e63d | refs/heads/master | 2023-03-10T00:41:17.916609 | 2022-04-18T10:20:12 | 2022-04-18T10:20:12 | 184,541,751 | 0 | 1 | Apache-2.0 | 2023-03-04T08:58:50 | 2019-05-02T07:52:15 | Python | UTF-8 | Python | false | false | 3,452 | py | from typing import Optional, List, Union
import pandas as pd
from ray.ml.checkpoint import Checkpoint
from ray.ml.predictor import Predictor, DataBatchType
from ray.ml.preprocessor import Preprocessor
from sklearn.base import BaseEstimator
class SklearnPredictor(Predictor):
"""A predictor for scikit-learn compatible estimators.
Args:
estimator: The fitted scikit-learn compatible estimator to use for
predictions.
preprocessor: A preprocessor used to transform data batches prior
to prediction.
"""
def __init__(
self, estimator: BaseEstimator, preprocessor: Optional[Preprocessor] = None
):
self.estimator = estimator
self.preprocessor = preprocessor
@classmethod
def from_checkpoint(cls, checkpoint: Checkpoint) -> "SklearnPredictor":
"""Instantiate the predictor from a Checkpoint.
The checkpoint is expected to be a result of ``SklearnTrainer``.
Args:
checkpoint (Checkpoint): The checkpoint to load the model and
preprocessor from. It is expected to be from the result of a
``SklearnTrainer`` run.
"""
raise NotImplementedError
def predict(
self,
data: DataBatchType,
feature_columns: Optional[Union[List[str], List[int]]] = None,
**predict_kwargs,
) -> pd.DataFrame:
"""Run inference on data batch.
Args:
data: A batch of input data. Either a pandas DataFrame or numpy
array.
feature_columns: The names or indices of the columns in the
data to use as features to predict on. If None, then use
all columns in ``data``.
**predict_kwargs: Keyword arguments passed to ``estimator.predict``.
Examples:
.. code-block:: python
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from ray.ml.predictors.sklearn import SklearnPredictor
train_X = np.array([[1, 2], [3, 4]])
train_y = np.array([0, 1])
model = RandomForestClassifier().fit(train_X, train_y)
predictor = SklearnPredictor(model=model)
data = np.array([[1, 2], [3, 4]])
predictions = predictor.predict(data)
# Only use first and second column as the feature
data = np.array([[1, 2, 8], [3, 4, 9]])
predictions = predictor.predict(data, feature_columns=[0, 1])
.. code-block:: python
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from ray.ml.predictors.sklearn import SklearnPredictor
train_X = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
train_y = pd.Series([0, 1])
model = RandomForestClassifier().fit(train_X, train_y)
predictor = SklearnPredictor(model=model)
# Pandas dataframe.
data = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
predictions = predictor.predict(data)
# Only use first and second column as the feature
data = pd.DataFrame([[1, 2, 8], [3, 4, 9]], columns=["A", "B", "C"])
predictions = predictor.predict(data, feature_columns=["A", "B"])
Returns:
pd.DataFrame: Prediction result.
"""
raise NotImplementedError
| [
"noreply@github.com"
] | pengzhenghao.noreply@github.com |
f143b9e6ab91c4c9fa6f94e1f36a988af36b2133 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /third_party/blink/web_tests/external/wpt/eventsource/resources/cors.py | 6ed31f2cd7d1782f8b7267d646d3ba26ab1a2a6d | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 1,481 | py | import os
from wptserve import pipes
from wptserve.utils import isomorphic_decode
def run_other(request, response, path):
#This is a terrible hack
environ = {u"__file__": path}
exec(compile(open(path, u"r").read(), path, u'exec'), environ, environ)
rv = environ[u"main"](request, response)
return rv
def main(request, response):
origin = request.GET.first(b"origin", request.headers[b"origin"])
credentials = request.GET.first(b"credentials", b"true")
response.headers.update([(b"Access-Control-Allow-Origin", origin),
(b"Access-Control-Allow-Credentials", credentials)])
handler = request.GET.first(b'run')
if handler in [b"status-reconnect",
b"message",
b"redirect",
b"cache-control"]:
if handler == b"cache-control":
response.headers.set(b"Content-Type", b"text/event-stream")
rv = open(os.path.join(request.doc_root, u"eventsource", u"resources", u"cache-control.event_stream"), u"r").read()
response.content = rv
pipes.sub(request, response)
return
elif handler == b"redirect":
return run_other(request, response, os.path.join(request.doc_root, u"common", u"redirect.py"))
else:
return run_other(request, response, os.path.join(os.path.dirname(isomorphic_decode(__file__)), isomorphic_decode(handler) + u".py"))
else:
return
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
ee5635c50525121a7abafdf4f2497ca80d592b88 | ef243d91a1826b490e935fa3f3e6c29c3cc547d0 | /PyQt5/QtSensors/QDistanceReading.py | 93010e1e2e32d6db692726e8427d0fdb40d8b5f6 | [] | no_license | VentiFang/Python_local_module | 6b3d0b22399e817057dfd15d647a14bb1e41980e | c44f55379eca2818b29732c2815480ee755ae3fb | refs/heads/master | 2020-11-29T11:24:54.932967 | 2019-12-25T12:57:14 | 2019-12-25T12:57:14 | 230,101,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # encoding: utf-8
# module PyQt5.QtSensors
# from F:\Python\Python36\lib\site-packages\PyQt5\QtSensors.pyd
# by generator 1.147
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import sip as __sip
from .QSensorReading import QSensorReading
class QDistanceReading(QSensorReading):
# no doc
def distance(self): # real signature unknown; restored from __doc__
""" distance(self) -> float """
return 0.0
def setDistance(self, p_float): # real signature unknown; restored from __doc__
""" setDistance(self, float) """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
| [
"5149528+ventifang@user.noreply.gitee.com"
] | 5149528+ventifang@user.noreply.gitee.com |
1ed5e23a6eec5d59476d6e9f794889ccaf9c1d50 | ba1e90ae6ea9f8f74d9b542e159825341c717712 | /2015/iitbhu3.py | 0b48444d1b36719c8ee5de5dc0558536774e0724 | [] | no_license | sailesh2/CompetitiveCode | b384687a7caa8980ab9b9c9deef2488b0bfe9cd9 | 5671dac08216f4ce75d5992e6af8208fa2324d12 | refs/heads/master | 2021-06-24T22:39:11.396049 | 2020-11-27T05:22:17 | 2020-11-27T05:22:17 | 161,877,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | x=raw_input().split(' ')
a=int(x[0])
b=int(x[1])
y=raw_input().split(' ')
n=int(y[0])
m=int(y[1])
savea=a
saveb=b
if a>b:
while 1:
if a%b==0:
gcd=b
break
temp=a
a=b
b=temp%b
else:
while 1:
if b%a==0:
gcd=a
break
temp=b
b=a
a=temp%a
#print gcd
lcm=(savea*saveb)/gcd
#print lcm
if lcm/savea<=n and lcm/saveb<=m:
print "Yes"
else:
print "No"
| [
"sailesh.ku.upadhyaya@gmail.com"
] | sailesh.ku.upadhyaya@gmail.com |
8eb6dd51ef164ee1452d1e90314b69b391ac91a8 | c0f4104194a7989e44d7f0161b2425c5a5bc3a98 | /senlin/tests/unit/apiv1/test_cluster_policies.py | 547c7f11cea39189106c81f3b497db2e0be8cd0a | [] | no_license | bopopescu/Openstack-2 | f65470bdd0ee4736c45b6f869f0453cb8eb446c8 | 6f06133562e3dfd490695a92c9ddf1a322675104 | refs/heads/master | 2022-11-28T09:19:21.633850 | 2016-06-23T07:55:32 | 2016-06-23T07:55:32 | 282,095,817 | 0 | 0 | null | 2020-07-24T01:44:49 | 2020-07-24T01:44:48 | null | UTF-8 | Python | false | false | 7,295 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from senlin.api.middleware import fault
from senlin.api.openstack.v1 import cluster_policies as cp_mod
from senlin.common import exception as senlin_exc
from senlin.common import policy
from senlin.rpc import client as rpc_client
from senlin.tests.unit.apiv1 import shared
from senlin.tests.unit.common import base
@mock.patch.object(policy, 'enforce')
class ClusterPolicyControllerTest(shared.ControllerTest, base.SenlinTestCase):
'''Tests the API class which acts as the WSGI controller.'''
def setUp(self):
super(ClusterPolicyControllerTest, self).setUp()
# Create WSGI controller instance
class DummyConfig(object):
bind_port = 8778
cfgopts = DummyConfig()
self.controller = cp_mod.ClusterPolicyController(options=cfgopts)
def test_cluster_policy_index(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
cid = 'test_cluster'
req = self._get('/cluster_policies/%s' % cid)
engine_resp = [
{
'id': 'fake_id',
'cluster_id': 'fake cluster id',
'policy_id': 'fake policy id',
'enabled': True,
'data': {},
'cluster_name': 'test_cluster',
'policy_name': 'test_policy',
'policy_type': 'ScalingPolicy',
}
]
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
result = self.controller.index(req, cluster_id=cid)
default_args = {'sort': None, 'filters': None, 'identity': cid}
mock_call.assert_called_with(req.context,
('cluster_policy_list', default_args))
expected = {'cluster_policies': engine_resp}
self.assertEqual(expected, result)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_cluster_policy_index_whitelists_params(self, mock_call,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
cid = 'FAKE_CLUSTER'
params = {
'sort': 'fake sorting string',
'filters': None,
'balrog': 'you shall not pass!'
}
req = self._get('/cluster_policies/%s' % cid, params=params)
mock_call.return_value = []
self.controller.index(req, cluster_id=cid)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(3, len(engine_args))
self.assertIn('sort', engine_args)
self.assertIn('filters', engine_args)
self.assertNotIn('balrog', engine_args)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_cluster_policy_index_whitelist_filter_params(self, mock_call,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
cid = 'FAKE_CLUSTER'
params = {
'enabled': 'True',
'balrog': 'you shall not pass!'
}
req = self._get('/cluster_policies/%s' % cid, params=params)
mock_call.return_value = []
self.controller.index(req, cluster_id=cid)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertIn('filters', engine_args)
filters = engine_args['filters']
self.assertEqual(1, len(filters))
self.assertTrue(filters['enabled'])
self.assertNotIn('balrog', filters)
def test_cluster_policy_index_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
cid = 'FAKE_CLUSTER'
req = self._get('/cluster_policy/%s' % cid)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, cluster_id=cid)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_cluster_policy_get_success(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'get', True)
cid = 'FAKE_CLUSTER'
pid = 'FAKE_POLICY'
req = self._get('/cluster_policies/%(cid)s/%(pid)s'
'' % {'cid': cid, 'pid': pid})
engine_resp = {
'id': 'fake_id',
'cluster_id': cid,
'policy_id': pid,
'enabled': True,
'data': {},
'cluster_name': 'test_cluster',
'policy_name': 'test_policy',
'policy_type': 'ScalingPolicy',
}
mock_call = self.patchobject(rpc_client.EngineClient, 'call',
return_value=engine_resp)
response = self.controller.get(req, cluster_id=cid, policy_id=pid)
mock_call.assert_called_once_with(
req.context, ('cluster_policy_get',
{'identity': cid, 'policy_id': pid}))
self.assertEqual({'cluster_policy': engine_resp}, response)
def test_cluster_policy_get_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'get', True)
cid = 'FAKE_CLUSTER'
pid = 'FAKE_POLICY'
req = self._get('/cluster_policies/%(cid)s/%(pid)s'
'' % {'cid': cid, 'pid': pid})
error = senlin_exc.PolicyBindingNotFound(policy=pid, identity=cid)
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
mock_call.side_effect = shared.to_remote_error(error)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.get,
req, cluster_id=cid,
policy_id=pid)
self.assertEqual(404, resp.json['code'])
self.assertEqual('PolicyBindingNotFound', resp.json['error']['type'])
def test_action_get_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'get', False)
cid = 'FAKE_CLUSTER'
pid = 'FAKE_POLICY'
req = self._get('/cluster_policies/%(cid)s/%(pid)s'
'' % {'cid': cid, 'pid': pid})
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.get,
req, cluster_id=cid,
policy_id=pid)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
| [
"egonmin@CN00119199"
] | egonmin@CN00119199 |
3b9d3f0c092a22bced0ce36744a8a86dec30f188 | f034ce134705b2de79a5aef85496e0ed9eabd700 | /market/migrations/0001_initial.py | 930c638800dc0ee0538596413d50661da2ce1491 | [] | no_license | poudel/animal-farm | 4d7961ae3b8b64f382232f2f3a9c0fc41be392ab | 646a6156fd60a73e6e50de1c2891ae25a13dae30 | refs/heads/develop | 2021-04-06T06:26:35.834800 | 2018-03-14T15:47:12 | 2018-03-14T15:47:12 | 125,237,057 | 8 | 0 | null | 2020-02-11T21:25:44 | 2018-03-14T15:50:45 | Python | UTF-8 | Python | false | false | 3,301 | py | # Generated by Django 2.0.3 on 2018-03-10 06:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('livestock', '0007_auto_20180310_1209'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('modified_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('is_deleted', models.BooleanField(default=False)),
('title', models.CharField(max_length=100, verbose_name='title')),
('description', models.TextField(verbose_name='description')),
('animal', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='livestock.Animal', verbose_name='animal')),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_products', to=settings.AUTH_USER_MODEL, verbose_name='created by')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Seller',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('modified_at', models.DateTimeField(auto_now=True)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('is_deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=50, verbose_name='name')),
('about', models.TextField(verbose_name='about')),
('mobile', models.CharField(max_length=10, verbose_name='mobile number')),
('status', models.CharField(choices=[('Pending', 'Pending verification'), ('Verified', 'Verified'), ('Unverified', 'Unverified'), ('Banned', 'Banned'), ('Archived', 'Archived')], db_index=True, default='Pending', max_length=20, verbose_name='status')),
('checker', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='checked_sellers', to=settings.AUTH_USER_MODEL, verbose_name='profile checker')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sellers', to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='product',
name='seller',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='market.Seller', verbose_name='seller'),
),
]
| [
"self@keshab.net"
] | self@keshab.net |
ee633cead836f951cba9e7535fd70d9222b2ba1a | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pb_6025/sdB_pb_6025_lc.py | d912f3768ca33282d331c84172b5370d3149d7de | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[8.2935,1.325956], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pb_6025/sdB_pb_6025_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
a6c39c47a9e3abb4901208d7a8d2d9fc8bf7c7d4 | ea79ba14054cd4879968e75dc0cfc4890ba090b8 | /common/xrd-ui-tests-python/tests/xroad_global_groups_tests/XroadMemberAddToGlobalGroup.py | bbb863da99caf10e227ca5120b7869720df39da0 | [
"MIT"
] | permissive | VitaliStupin/X-Road-tests | bf70a5591721243622e6c3a2653aae30af6c4ae4 | 6103f3f5bbba387b8b59b050c0c4f1fb2180fc37 | refs/heads/develop | 2023-04-16T12:20:23.289803 | 2018-06-01T07:59:12 | 2018-06-01T07:59:12 | 84,918,233 | 0 | 0 | null | 2017-03-14T07:21:58 | 2017-03-14T07:21:58 | null | UTF-8 | Python | false | false | 1,725 | py | import unittest
from helpers import auditchecker, xroad
from main.maincontroller import MainController
from tests.xroad_global_groups_tests import global_groups_tests
class XroadMemberAddToGlobalGroup(unittest.TestCase):
"""
SERVICE_37 Add an X-Road Member to a Global Group
RIA URL: https://jira.ria.ee/browse/XTKB-182
Depends on finishing other test(s): remove from global group
Requires helper scenarios:
X-Road version: 6.16.0
"""
def test_member_add_to_global_group(self):
main = MainController(self)
cs_host = main.config.get('cs.host')
cs_user = main.config.get('cs.user')
cs_pass = main.config.get('cs.pass')
cs_ssh_host = main.config.get('cs.ssh_host')
cs_ssh_user = main.config.get('cs.ssh_user')
cs_ssh_pass = main.config.get('cs.ssh_pass')
log_checker = auditchecker.AuditChecker(cs_ssh_host, cs_ssh_user, cs_ssh_pass)
global_group = main.config.get('cs.global_group')
member_name = main.config.get('ss1.client_name')
member_code = xroad.split_xroad_id(main.config.get('ss1.client_id'))['code']
test_member_add_to_global_group = global_groups_tests.test_member_add_to_global_group(main, member_name,
member_code, global_group,
log_checker=log_checker)
try:
main.reload_webdriver(cs_host, cs_user, cs_pass)
test_member_add_to_global_group()
except:
main.save_exception_data()
raise
finally:
main.tearDown()
| [
"mardu@varuosakeskus.ee"
] | mardu@varuosakeskus.ee |
8713fa99e22ae736ff68230a7b32a4cdab41f7df | 9d67cd5f8d3e0ffdd4334a6b9b67c93f8deca100 | /configs/12_20share_old.py | cf6e73c0a3255ed59e3802cffdecf6980499f4aa | [] | no_license | SiyuanLee/caps | 0c300a8e5a9a661eca4b2f59cd38125ddc35b6d3 | 476802e18ca1c7c88f1e29ed66a90c350aa50c1f | refs/heads/master | 2021-06-20T22:48:16.230354 | 2021-02-22T13:21:57 | 2021-02-22T13:21:57 | 188,695,489 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,041 | py | """
This is the example config file
same_room
no parameter share
take a look at transfer_config (differences are there)
"""
import numpy as np
# More one-char representation will be added in order to support
# other objects.
# The following a=10 is an example although it does not work now
# as I have not included a '10' object yet.
a = 10
# This is the map array that represents the map
# You have to fill the array into a (m x n) matrix with all elements
# not None. A strange shape of the array may cause malfunction.
# Currently available object indices are # they can fill more than one element in the array.
# 0: nothing
# 1: wall
# 2: ladder
# 3: coin
# 4: spike
# 5: triangle -------source
# 6: square ------ source
# 7: coin -------- target
# 8: princess -------source
# 9: player # elements(possibly more than 1) filled will be selected randomly to place the player
# unsupported indices will work as 0: nothing
map_array = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 5, 6, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 9, 9, 9, 9, 1, 9, 9, 9, 8, 1],
[1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1],
[1, 0, 0, 2, 0, 0, 0, 2, 0, 7, 1],
[1, 9, 9, 2, 9, 9, 9, 2, 9, 9, 1],
[1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1],
[1, 0, 2, 0, 1, 0, 2, 0, 0, 0, 1],
[1, 0, 2, 0, 1, 0, 2, 0, 0, 0, 1],
[1, 9, 9, 9, 1, 9, 9, 9, 9, 9, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
# set to true -> win when touching the object
# 0, 1, 2, 3, 4, 9 are not possible
end_game = {
6: True,
}
rewards = {
"positive": 0, # when collecting a coin
"win": 1, # endgame (win)
"negative": -25, # endgame (die)
"tick": 0 # living
}
######### dqn only ##########
# ensure correct import
import os
import sys
__file_path = os.path.abspath(__file__)
__dqn_dir = '/'.join(str.split(__file_path, '/')[:-2]) + '/'
sys.path.append(__dqn_dir)
__cur_dir = '/'.join(str.split(__file_path, '/')[:-1]) + '/'
from dqn_utils import PiecewiseSchedule, NoOpWrapperMK
# load the random sampled obs
import pickle
pkl_file = __cur_dir + 'same.pkl'
with open(pkl_file, 'rb') as f:
eval_obs_array = pickle.loads(f.read())
def seed_func():
return np.random.randint(0, 1000)
num_timesteps = 2.5e7
learning_freq = 4
# training iterations to go
num_iter = num_timesteps / learning_freq
# piecewise learning rate
lr_multiplier = 1.0
learning_rate = PiecewiseSchedule([
(0, 2e-4 * lr_multiplier),
(num_iter / 2, 1e-4 * lr_multiplier),
(num_iter * 3 / 4, 5e-5 * lr_multiplier),
], outside_value=5e-5 * lr_multiplier)
# piecewise learning rate
exploration = PiecewiseSchedule([
(0, 1.0),
(num_iter / 2, 0.7),
(num_iter * 3 / 4, 0.1),
(num_iter * 7 / 8, 0.05),
], outside_value=0.05)
######### transfer only #########
source_dirs = [
# an old map policy
# '/home/lsy/logs/target6c_12_05_17_21:26:25/dqn',
# '/home/lsy/PycharmProjects/ple-monstrerkong/examples/dqn_new/logs/target5_12_05_17_19:49:45',
# '/home/lsy/target8c_12_10_17_15:25:06/dqn',
# '/home/beeperman/Project/ple-monsterkong/examples/dqn_new/logs/same_room_12_12_17_20:54:53/dqn',
#'/home/lsy/same_room_12_12_17_20:54:53/dqn',
'/home/lsy/ple-monstrerkong/examples/dqn_new/logs/same_room5_12_12_17_21:00:29/dqn',
]
transfer_config = {
'source_dirs': source_dirs,
'online_q_omega': False, # default false off policy with experience replay
'q_omega_uniform_sample': False, # default false
'four_to_two': False, # default false frame_history_len must be 4!
'source_noop': False, # default false (false means source policies HAS noop action)
'no_share_para': False, # default false set to true to stop sharing parameter between q network and q_omega/term
'debug_no_term_train': True
}
dqn_config = {
'seed': seed_func, # will override game settings
'num_timesteps': num_timesteps,
'replay_buffer_size': 1000000,
'batch_size': 32,
'gamma': 0.99,
'learning_starts': 1e3,
'learning_freq': learning_freq,
'frame_history_len': 2,
'target_update_freq': 10000,
'grad_norm_clipping': 10,
'learning_rate': learning_rate,
'exploration': exploration,
'additional_wrapper': NoOpWrapperMK,
'eval_obs_array': eval_obs_array, # TODO: construct some eval_obs_array
'room_q_interval': 5e4, # q_vals will be evaluated every room_q_interval steps
'epoch_size': 5e4, # you decide any way
'config_name': str.split(__file_path, '/')[-1].replace('.py', ''), # the config file name
'transfer_config': transfer_config,
}
map_config = {
'map_array': map_array,
'rewards': rewards,
'end_game': end_game,
'init_score': 0,
'init_lives': 1, # please don't change, not going to work
# configs for dqn
'dqn_config': dqn_config,
# work automatically only for aigym wrapped version
'fps': 1000,
'frame_skip': 1,
'force_fps': True, # set to true to make the game run as fast as possible
'display_screen': False,
'episode_length': 1200,
'episode_end_sleep': 0., # sec
} | [
"lisiyuan@bupt.edu.cn"
] | lisiyuan@bupt.edu.cn |
706bb6fb18f57fe7fbff7f5b7082205fde6883cf | 5b6ec656a247d10011fd67a920aa002ebdf873c3 | /Ecommerce Website/Ecommerce Website 1.3/EcommerceWebsite/urls.py | 0c1b7936cf2a6e86257310459061cbcc2ef5174e | [] | no_license | KhaledAbuNada-AI/Django-Projects | cfb46d46da5f5358171294ca8c02c62c5babf2cf | ff264426d7a650f3c513678bbd71b5519372f6d3 | refs/heads/master | 2022-04-24T10:52:26.791436 | 2020-04-22T15:27:37 | 2020-04-22T15:27:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | """EcommerceWebsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name='index'),
path('shop/', include('shop.urls'))
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"siddhant.shah.1986@gmail.com"
] | siddhant.shah.1986@gmail.com |
8239c124f63cbbc3e2ce479cc233adb943472bcf | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_267/ch77_2020_04_08_20_40_26_130258.py | 01494b6b51477f2062e1834d94e0c986db072780 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | def calcula_tempo(dicio):
nome_tempo = {}
i = 0
for e,a in dicio.items():
t = (200/a)**(1/2)
nome_tempo[e] = t
i += 1
return nome_tempo
| [
"you@example.com"
] | you@example.com |
e1a7c547a214c1f8836d35c4c7eacd9583f9394b | 96e74d3d36a8394f2f9a094c1eebe9e292b8123f | /setup.py | 0041bc7c6a46b037e90af71d5841c6cc73fbef18 | [] | no_license | kundajelab/locusselect | cf8073c541c2b363f70ff6e54f3d607701f6a832 | aafebf5e43e514e824a36ae07f5336a683e17b88 | refs/heads/master | 2021-07-02T12:14:14.281841 | 2020-11-12T10:10:16 | 2020-11-12T10:10:16 | 195,343,578 | 2 | 2 | null | 2019-07-19T19:31:13 | 2019-07-05T05:23:27 | Jupyter Notebook | UTF-8 | Python | false | false | 1,040 | py | from setuptools import setup,find_packages
config = {
'include_package_data': True,
'description': 'Compute deep learning embeddings for narrowPeak files; compute pairwise distance between embeddings and cluster with tSNE',
'download_url': 'https://github.com/kundajelab/locusselect',
'version': '0.3',
'packages': ['locusselect'],
'setup_requires': [],
'install_requires': ['numpy>=1.9', 'keras>=2.2', 'h5py', 'pandas','deeplift'],
'scripts': [],
'entry_points': {'console_scripts': ['compute_nn_embeddings = locusselect.embeddings:main',
'compute_interpretation_scores = locusselect.interpret:main',
'compute_embedding_distances = locusselect.dist:main',
'visualize_embeddings =locusselect.vis:main',
'compute_kmer_embeddings = locusselect.gapped_kmers:main']},
'name': 'locusselect'
}
if __name__== '__main__':
setup(**config)
| [
"annashcherbina@gmail.com"
] | annashcherbina@gmail.com |
9fddbd257c40305611a75397a400ebbb4e82b974 | c89e59b4d018e8a2d7dc0dbc3bb7a3768024f849 | /before2021/python/문제풀이/day6/7_건물세우기.py | 2d5eeac2e65309d19d45f900abf451c444c92311 | [] | no_license | leeiopd/algorithm | ff32103a43e467a5a091257cc07cf35365ecbf91 | e41647d3918c3099110d97f455c5ebf9a38d571e | refs/heads/master | 2023-03-08T23:46:34.919991 | 2023-02-22T09:39:46 | 2023-02-22T09:39:46 | 166,131,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | '''
(주)정올에서는 여러 개의 빌딩을 새로 지을 계획이다. 그래서 빌딩을 세울 장소를 선정하였다.
그리고 각 빌딩을 각 장소에 세울 경우에 드는 비용을 추정하였다. 예를 들어서 아래의 표를 보자
1 2 3
A 4 7 3
B 2 6 1
C 3 9 4
A, B, C 는 건물을 나타내고, 1, 2, 3은 장소를 나타낸다.
예를 들어서 건물 B를 장소 1에 세우면 비용이 2가 들고, 장소 2에 세우면 비용이 6, 장소 3에 세우면 비용이 1만큼 든다.
물론 한 장소에는 한 건물밖에 세울 수 없다. 만일 A를 장소 2에, B를 장소 3에, C를 1에 세우면 전체 비용이 7+1+3 = 11이 필요하다.
그런데 A를 3, B를 1, C를 2에 세우면 3+2+9 = 14 가 필요하다.
각 빌딩을 어느 장소에 세우면 비용의 합이 최소가 되는지 구하는 프로그램을 작성하시오.
입력 파일의 첫 줄은 빌딩의 개수 n(1≤n≤10)이 들어있다.
그 다음 n 줄에는 각 건물을 각 장소에 세울 경우에 드는 비용이 입력된다. 물론 각 줄 에는 n개의 수가 입력된다.
비용을 나타내는 수의 범위는 1이상 100미만이다.
첫 줄에는 최소비용을 출력한다.
'''
import sys
sys.stdin = open("7_input.txt")
N = int(input())
top = -1
visited = [-99] * N
maps = []
for y in range(N):
maps.append(list(map(int, input().split())))
result = 9999999999999
def check():
global top, result, visited
if top == N-1:
add = 0
for i in range(N):
add += maps[i][visited[i]]
if add > result:
return
if add < result:
result = add
return
add = 0
for i in range(top):
add += maps[i][visited[i]]
if add > result:
return
for i in range(N):
if i not in visited:
top += 1
visited[top] = i
check()
visited[top] = -99
top -= 1
check()
print(result) | [
"leeiopd@hanmail.net"
] | leeiopd@hanmail.net |
6f8e8834f52ca848594d52208b0a41a8329f2961 | d23735419170bc51979bd37e6e82909ded61b818 | /image_lucida_project/image_lucida_app/context-processors.py | 97126abdacf5c034ac3bf03e29879d31545689ac | [] | no_license | ZoeLeBlanc/ImageLucida | 7f691d956faa2c5490c7a6b7d1ab69439636ea58 | edf41df372f31cac0682b9bfec85b2846b78a936 | refs/heads/main | 2023-02-20T12:41:20.372916 | 2022-05-30T02:12:26 | 2022-05-30T02:12:26 | 82,299,769 | 1 | 0 | null | 2023-02-15T18:32:12 | 2017-02-17T13:26:13 | Python | UTF-8 | Python | false | false | 1,665 | py | from image_lucida_app.models import *
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.models import User
def navigation(context):
"""
The navigation context processor generates the dynamic navbar for the index.html template.
"""
if context.user.is_authenticated():
welcome_message = 'Welcome '+user.username
list_of_nav = [
{
'name':'Projects',
'link': '/projects/',
'prop': 'right',
'nav-loc': 'main'
},
{
'name':'Play',
'link': '/play/',
'prop': 'right',
'nav-loc': 'main'
},
{
'name': 'Profile',
'link': '/profile/',
'prop': 'right',
'nav-loc': 'main'
},
{
'name': welcome_message,
'link': '#',
'nav-loc': 'side'
},
{
'name':'Logout',
'link': '/logout/',
'nav-loc': 'side'
},
]
else:
# if user is not logged in show 0 next to cart
list_of_nav = [
{
'name':'Register',
'link': '/register/'
},
{
'name':'Login',
'link': '/login/'
}
]
return {'navigation': list_of_nav}
| [
"zoe.leblanc@vanderbilt.edu"
] | zoe.leblanc@vanderbilt.edu |
6ba1f46941ad26b0e5603b42a8af33aca521b913 | 40832e48ef481b4141435459afeaf0706fa6dc13 | /flask/crud/app.py | 0850249ba85737b18275e868b63a0dc907261a18 | [] | no_license | drgarcia1986/python-web-frameworks-crud | 93d85c9c35ae016d21d13549081fab84c33dbfe6 | 1fe110a3af5320ec6aecf277a45f61e3fc7df8be | refs/heads/master | 2021-01-01T16:49:52.529747 | 2015-09-14T13:39:13 | 2015-09-14T13:39:13 | 39,610,696 | 4 | 1 | null | 2015-09-14T13:39:14 | 2015-07-24T03:53:36 | Python | UTF-8 | Python | false | false | 621 | py | # -*- coding: utf-8 -*-
from flask import Flask
from .extensions import db
from .person.views import person_bp
def create_app():
app = Flask(__name__)
app.config.update(
SQLALCHEMY_DATABASE_URI='sqlite:///database.db'
)
register_extensions(app)
register_bluenprints(app)
create_database(app)
return app
def register_extensions(app):
db.init_app(app)
def register_bluenprints(app):
app.register_blueprint(person_bp, url_prefix='/api/persons')
def create_database(app):
from .person.models import Person # noqa
with app.app_context():
db.create_all()
| [
"drgarcia1986@gmail.com"
] | drgarcia1986@gmail.com |
7c016d8b6388aebf0272e9d020906a09f3c9df6b | 4cb189467bf31816fcd8bfb248947b7dd00c2017 | /pixace/__init__.py | 3819954c037ec6693028c42d50dcb3c11351c8bf | [
"MIT"
] | permissive | vishnubob/pixace | 6945861372d70fbbbe8f15ac1d36d65b8f0b0f06 | 8871f3ac79101a2e7780571b8aafb226382ad83d | refs/heads/main | 2023-02-25T14:01:47.110728 | 2020-12-26T03:10:40 | 2020-12-26T03:10:40 | 320,168,163 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | def download_model(model_name=None, checkpoint="default", weights_dir="model-weights"):
from . zoo import ModelZoo
zoo = ModelZoo(weights_dir=weights_dir)
return zoo.download(model_name=model_name, checkpoint=checkpoint)
def get_trainer(
model_name=None,
model_type="reformer",
weights_dir="model-weights",
image_size=32,
bitdepth=(5,4,4),
):
from . train import Trainer
return Trainer(
model_name=model_name,
model_type=model_type,
weights_dir=weights_dir,
image_size=image_size,
bitdepth=bitdepth,
)
def get_predictor(
model_name=None,
model_type="reformer",
weights_dir="model-weights",
checkpoint=None,
image_size=32,
bitdepth=(5,4,4)
):
from . inference import Inference
return Inference(
model_name=model_name,
model_type=model_type,
weights_dir=weights_dir,
image_size=image_size,
bitdepth=bitdepth,
)
| [
"giles@polymerase.org"
] | giles@polymerase.org |
674a462645ec6e5cafacdcf593439c253f7c3c93 | 32dcb7c872cbc5048a2024df73866ee20e7405ec | /0x0B-python-input_output/7-save_to_json_file.py | 16166a4b92ea1c6e34dbec961951bd9ae6613ebe | [] | no_license | Bzhamdi/holbertonschool-higher_level_programming | f52eccc48fe388d6728e59e68da336b392057b8e | d92c749ed64d8b795533105520ddea4e12c2a508 | refs/heads/master | 2023-06-07T08:07:47.579114 | 2021-06-24T22:55:38 | 2021-06-24T22:55:38 | 259,213,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | #!/usr/bin/python3
"""save_to_json_file"""
import json
def save_to_json_file(my_obj, filename):
"""save object to jason file"""
with open(filename, 'w', encoding='utf-8') as file:
return json.dump(my_obj, file)
| [
"bouzouitina.hamdi@gmail.com"
] | bouzouitina.hamdi@gmail.com |
ba621a38f90ffdae0a50420c5fe784f09c301c67 | 0b4d36e45ac9a192982f01ebab15321981a17be5 | /app/admin/views/__init__.py | 2f32a1494d559d8911ab501dcc05d5b027e6a58a | [] | no_license | xuannanxan/maitul | 02f4f3ce85f02915c8d18cb4d291c3c6da4573d5 | 6407415f6beb6677875b23b06ac694996e840256 | refs/heads/master | 2020-07-11T00:10:18.569571 | 2019-11-17T12:39:38 | 2019-11-17T12:39:38 | 204,406,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,603 | py | # -*- coding: utf-8 -*-
# Created by xuannan on 2019-01-26.
__author__ = 'Allen xu'
from datetime import datetime
from flask_login import current_user
from flask import request,session
from app.expand.utils import build_tree,object_to_dict
from app.models import Crud,Menu,Conf,Admin,Role,Auth
from app.models.base import db
from .. import admin
# 上下文处理器
@admin.context_processor
def tpl_extra():
menu_data = Crud.get_data(Menu, Menu.sort.desc())
rule = str(request.url_rule)
#用户权限列表
auth_urls = []
if hasattr(current_user, 'id') and current_user.id != 1:
auth_urls = session.get('auth_urls')
# 如果有分页,去掉分页标签
has_pagination = rule.find("<")
if has_pagination>0:
rule = rule[0:has_pagination-1]
#获取当前菜单信息,用于页面展示
active_menu = Menu.query.filter(Menu.url == rule).first()
#配置项
conf_model_data = Crud.get_data(Conf, Conf.sort.desc())
conf_data,webconfig = [],{}
for v in conf_model_data:
conf = object_to_dict(v)
if conf['optional_values']:
conf['optional_values'] = (conf['optional_values']).split(',')
conf_data.append(conf)
webconfig[conf['ename']] = conf['default_value']
data = dict(
online_time= datetime.now().strftime("%Y/%m/%d %H:%M:%S"),
menu_tree=build_tree(menu_data, 0, 0),
rule = rule,
active_menu = active_menu,
conf_data = conf_data,
webconfig = webconfig,
auth_urls = auth_urls
)
return data | [
"382933169@qq.com"
] | 382933169@qq.com |
333a6c4dfc8182319414de1bdc18d089c1898ac5 | c7a1c1ae40e9d95dfb92251dcfbf3c5010e6ba81 | /picamera/essentials-camera/Chapter 05 - Python/ch5listing2.py | 0b39ad336e65cbd478911fd113792f6648cee2cd | [] | no_license | pranavlathigara/Raspberry-Pi-DIY-Projects | efd18e2e5b9b8369bb1a5f5418782480cf9bc729 | 0c14c316898d4d06015912ac4a8cb7b71a3980c0 | refs/heads/master | 2021-04-06T09:14:28.088223 | 2018-02-19T00:15:22 | 2018-02-19T00:15:22 | 124,649,553 | 1 | 2 | null | 2018-03-10T11:30:59 | 2018-03-10T11:30:59 | null | UTF-8 | Python | false | false | 173 | py | from picamera import PiCamera
from time import sleep
camera = PiCamera()
camera.start_preview()
sleep(5)
camera.capture('/home/pi/Desktop/image.jpg')
camera.stop_preview() | [
"tdamdouni@users.noreply.github.com"
] | tdamdouni@users.noreply.github.com |
df1725401ad1ed3098e58f43cf648d10b867d034 | d45b4db35e5e8baef1aa71bb8ae55236e8e8de67 | /rm2bed.py | e36135ab8501ab8e641e864df18e86f177377c97 | [] | no_license | davek44/utility | a5af6bfff2cf576671dcdfa7bdfdac97a417b26a | 5a2581078bf9dab78cc182f2917ecb671d04570c | refs/heads/master | 2023-04-30T21:19:40.683342 | 2023-04-20T22:30:48 | 2023-04-20T22:30:48 | 7,212,829 | 18 | 11 | null | null | null | null | UTF-8 | Python | false | false | 1,352 | py | #!/usr/bin/env python
from optparse import OptionParser
import gzip
'''
rm2bed.py
Convert RepeatMasker .out format to BED.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <rm out>'
parser = OptionParser(usage)
#parser.add_option()
(options,args) = parser.parse_args()
if len(args) != 1:
parser.error('Must provide RepeatMasker .out file')
else:
if args[0][-2:] == 'gz':
rm_in = gzip.open(args[0], 'rt')
else:
rm_in = open(args[0])
for i in range(4):
line = rm_in.readline()
while line:
a = line.split()
chrm = a[4]
start = str(int(a[5])-1)
end = a[6]
if a[8] == '+':
strand = '+'
else:
strand = '-'
repeat = a[9]
family = a[10]
cols = (chrm, start, end, '%s;%s' % (family,repeat), '.', strand)
print('\t'.join(cols))
line = rm_in.readline()
rm_in.close()
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| [
"drk@calicolabs.com"
] | drk@calicolabs.com |
c1881a3f7167b40d0502938ef8175b49df657c8c | e86364b36b82c24596dd71f9fa2221d036e8defc | /collections/ansible_collections/arista/eos/plugins/modules/eos_l2_interfaces.py | bb55e26753e4377bd7f101bd2b60c9ca4dbcffa8 | [] | no_license | ganeshrn/network_collections_migration | b3f11be5ecb9557787bcd12ca01b227379c7c102 | 8f56b60bfde606b291627665a1218bf7ce15f3a1 | refs/heads/master | 2020-09-12T12:10:58.189645 | 2019-11-18T11:44:48 | 2019-11-18T11:44:48 | 222,419,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,267 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
##############################################
# WARNING #
##############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
##############################################
"""
The module file for eos_l2_interfaces
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = '''module: eos_l2_interfaces
short_description: Manages Layer-2 interface attributes of Arista EOS devices
description: This module provides declarative management of Layer-2 interface on Arista
EOS devices.
author: Nathaniel Case (@qalthos)
notes:
- Tested against Arista EOS 4.20.10M
- This module works with connection C(network_cli). See the L(EOS Platform Options,../network/user_guide/platform_eos.html).
options:
config:
description: A dictionary of Layer-2 interface options
type: list
elements: dict
suboptions:
name:
description:
- Full name of interface, e.g. Ethernet1.
type: str
required: true
access:
description:
- Switchport mode access command to configure the interface as a layer 2 access.
type: dict
suboptions:
vlan:
description:
- Configure given VLAN in access port. It's used as the access VLAN ID.
type: int
trunk:
description:
- Switchport mode trunk command to configure the interface as a Layer 2 trunk.
type: dict
suboptions:
native_vlan:
description:
- Native VLAN to be configured in trunk port. It is used as the trunk
native VLAN ID.
type: int
trunk_allowed_vlans:
description:
- List of allowed VLANs in a given trunk port. These are the only VLANs
that will be configured on the trunk.
type: list
state:
choices:
- merged
- replaced
- overridden
- deleted
default: merged
description:
- The state of the configuration after module completion
type: str
'''
EXAMPLES = """
---
# Using merged
# Before state:
# -------------
#
# veos#show running-config | section interface
# interface Ethernet1
# switchport access vlan 20
# !
# interface Ethernet2
# switchport trunk native vlan 20
# switchport mode trunk
# !
# interface Management1
# ip address dhcp
# ipv6 address auto-config
# !
- name: Merge provided configuration with device configuration.
eos_l2_interfaces:
config:
- name: Ethernet1
trunk:
native_vlan: 10
- name: Ethernet2
access:
vlan: 30
state: merged
# After state:
# ------------
#
# veos#show running-config | section interface
# interface Ethernet1
# switchport trunk native vlan 10
# switchport mode trunk
# !
# interface Ethernet2
# switchport access vlan 30
# !
# interface Management1
# ip address dhcp
# ipv6 address auto-config
# !
# Using replaced
# Before state:
# -------------
#
# veos2#show running-config | s int
# interface Ethernet1
# switchport access vlan 20
# !
# interface Ethernet2
# switchport trunk native vlan 20
# switchport mode trunk
# !
# interface Management1
# ip address dhcp
# ipv6 address auto-config
# !
- name: Replace device configuration of specified L2 interfaces with provided configuration.
eos_l2_interfaces:
config:
- name: Ethernet1
trunk:
native_vlan: 20
trunk_vlans: 5-10, 15
state: replaced
# After state:
# ------------
#
# veos#show running-config | section interface
# interface Ethernet1
# switchport trunk native vlan 20
# switchport trunk allowed vlan 5-10,15
# switchport mode trunk
# !
# interface Ethernet2
# switchport trunk native vlan 20
# switchport mode trunk
# !
# interface Management1
# ip address dhcp
# ipv6 address auto-config
# !
# Using overridden
# Before state:
# -------------
#
# veos#show running-config | section interface
# interface Ethernet1
# switchport access vlan 20
# !
# interface Ethernet2
# switchport trunk native vlan 20
# switchport mode trunk
# !
# interface Management1
# ip address dhcp
# ipv6 address auto-config
# !
- name: Override device configuration of all L2 interfaces on device with provided configuration.
eos_l2_interfaces:
config:
- name: Ethernet2
access:
vlan: 30
state: overridden
# After state:
# ------------
#
# veos#show running-config | section interface
# interface Ethernet1
# !
# interface Ethernet2
# switchport access vlan 30
# !
# interface Management1
# ip address dhcp
# ipv6 address auto-config
# !
# Using deleted
# Before state:
# -------------
#
# veos#show running-config | section interface
# interface Ethernet1
# switchport access vlan 20
# !
# interface Ethernet2
# switchport trunk native vlan 20
# switchport mode trunk
# !
# interface Management1
# ip address dhcp
# ipv6 address auto-config
# !
- name: Delete EOS L2 interfaces as in given arguments.
eos_l2_interfaces:
config:
- name: Ethernet1
- name: Ethernet2
state: deleted
# After state:
# ------------
#
# veos#show running-config | section interface
# interface Ethernet1
# !
# interface Ethernet2
# !
# interface Management1
# ip address dhcp
# ipv6 address auto-config
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: list
sample: The configuration returned will always be in the same format of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: list
sample: The configuration returned will always be in the same format of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['interface Ethernet2', 'switchport access vlan 20']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.arista.eos.plugins.module_utils.network.eos.argspec.l2_interfaces.l2_interfaces import L2_interfacesArgs
from ansible_collections.arista.eos.plugins.module_utils.network.eos.config.l2_interfaces.l2_interfaces import L2_interfaces
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(argument_spec=L2_interfacesArgs.argument_spec,
supports_check_mode=True)
result = L2_interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
| [
"ganesh634@gmail.com"
] | ganesh634@gmail.com |
38cfc7ab0516a8cc95e4612d23acabbe0368a327 | 2c13edf74323021a63697216bb39e1e2e9758342 | /listBackupFiles.py | 7efc89d660b08ec426460fdb5e011811f2a48e75 | [] | no_license | andreycizov/Ahsay-OBC-Restore-tool | 5126d0e9460b3a78ed51c41bacd7d1d3eb7372ea | b94450a8bd5de47ab1d909df93097950cd1af6c6 | refs/heads/master | 2021-01-22T19:13:47.283496 | 2011-08-26T17:53:23 | 2011-08-26T17:53:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | #!/usr/bin/python3
from AhsayAPI import GZStreamRead as GZ
from AhsayAPI import XMLFileList as FL
from AhsayAPI import XMLFileInfo
from AhsayAPI import to_size
from AhsayAPI import salt
from AhsayAPI.urls import ls
from AhsayAPI.urls import urlopen
import sys
t = ls({'dir':sys.argv[1],
'backupjob':'Current',
'start_page':sys.argv[2]})
r = urlopen(t)
print(r.getheader('Content-Length'))
gz = GZ(r)
def callback(self, event, e):
if e.tag == 'F':
f = XMLFileInfo(e)
type = 'F'
if f.type == "T":
type = 'D'
print("{type} {name} {size} {size_enc} {salt}".format(
type=type, name=f.path, size=to_size(f.size),
size_enc=to_size(f.size_enc),
salt=salt(f.salt)
))
print(e.attrib)
#self.stdcallback(event, e)
def endcallback(self):
pass
fl = FL(gz, callback=callback, endcallback=endcallback)
fl.start()
print('Transferred data:', to_size(gz.n_comp),'/',to_size(gz.n))
| [
"acizov@gmail.com"
] | acizov@gmail.com |
a12ba1e7a58cbb5f71f5e88633c027104e7aa5a3 | 060b4486244008e40137b590397ed1264b4116de | /poetry/core/_vendor/configparser.py | 00c2335b787069a1566bc45f8b959f71f3307322 | [
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hoefling/core | d8bfa4b59e81a0a86d56c75e4b3d135c9f91ad2b | 7e9fbac94bbd2211b63421dc47fa91cb507c466d | refs/heads/master | 2022-05-08T06:29:41.070632 | 2020-04-22T18:59:34 | 2020-04-22T18:59:34 | 257,990,591 | 0 | 0 | MIT | 2020-04-22T18:46:57 | 2020-04-22T18:46:56 | null | UTF-8 | Python | false | false | 1,586 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Convenience module importing everything from backports.configparser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from poetry.core._vendor.backports.configparser import (
RawConfigParser,
ConfigParser,
SafeConfigParser,
SectionProxy,
Interpolation,
BasicInterpolation,
ExtendedInterpolation,
LegacyInterpolation,
NoSectionError,
DuplicateSectionError,
DuplicateOptionError,
NoOptionError,
InterpolationError,
InterpolationMissingOptionError,
InterpolationSyntaxError,
InterpolationDepthError,
ParsingError,
MissingSectionHeaderError,
ConverterMapping,
DEFAULTSECT,
MAX_INTERPOLATION_DEPTH,
)
from poetry.core._vendor.backports.configparser import Error, _UNSET, _default_dict, _ChainMap # noqa: F401
__all__ = [
"NoSectionError",
"DuplicateOptionError",
"DuplicateSectionError",
"NoOptionError",
"InterpolationError",
"InterpolationDepthError",
"InterpolationMissingOptionError",
"InterpolationSyntaxError",
"ParsingError",
"MissingSectionHeaderError",
"ConfigParser",
"SafeConfigParser",
"RawConfigParser",
"Interpolation",
"BasicInterpolation",
"ExtendedInterpolation",
"LegacyInterpolation",
"SectionProxy",
"ConverterMapping",
"DEFAULTSECT",
"MAX_INTERPOLATION_DEPTH",
]
# NOTE: names missing from __all__ imported anyway for backwards compatibility.
| [
"sebastien@eustace.io"
] | sebastien@eustace.io |
07a1b69874544a91d2d108bcfcdd3e27ba9f3de2 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /402/402.remove-k-digits.282089710.Accepted.leetcode.python3.py | 5768f0de4828d4f67130aabc3ed918c82c4573aa | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | class Solution:
def removeKdigits(self, num: str, k: int) -> str:
if k == num:
return "0"
stack = []
for i in range(len(num)):
while k and stack and stack[-1] > num[i]:
stack.pop()
k -= 1
stack.append(num[i])
while k:
stack.pop()
k -= 1
index = 0
while index < len(stack) and stack[index] == "0":
index += 1
return "0" if index == len(stack) else "".join(stack[index:])
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
2bc9de9da32d67746c3de41150491c969903db68 | 94d8bb0e323ee478b580f766b7700acd32b519fd | /augmented-reality/stitch.py | 7e6f169b0c70081fb1478e5f69c51dd4d3a04369 | [] | no_license | vanstorm9/SLAM-experiments | d5db1d7680193d664029230e135ddb0d5648d38d | b17c4f83ae2c7a9dfafebd8559953d7341699fc6 | refs/heads/master | 2021-01-17T20:26:38.135564 | 2016-08-20T05:59:06 | 2016-08-20T05:59:06 | 65,873,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | from panorama.panorama import Stitcher
import imutils
import numpy as np
import os
import cv2
widthResize = 600
def cropFocus(img):
subtractThresh = 20
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,1,255,cv2.THRESH_BINARY)
contours = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
x,y,w,h = cv2.boundingRect(cnt)
crop = img[y:y+h,x:x+w-subtractThresh]
return crop
def listDirCreate(root):
for imgPath in root:
imgPathList.append(imgPath)
imgPathList.sort()
return imgPathList
def drawHarris(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray,2,3,0.04)
#result is dilated for marking the corners, not important
dst = cv2.dilate(dst,None)
# Threshold for an optimal value, it may vary depending on the image.
img[dst>0.01*dst.max()]=[0,0,255]
return img
root_path = 'panorama-input/'
slash = '/'
root = os.listdir(root_path)
i = 0
result = None
imgPathList = []
# Creating list of paths
listDirCreate = listDirCreate(root)
print imgPathList
for fn in imgPathList:
print fn
if i == 0:
# This is our first image
mainImage = cv2.imread(root_path + slash + fn)
mainImage = imutils.resize(mainImage, width=widthResize)
#cv2.imwrite("mainImage.jpg", mainImage)
i = i + 1
continue
else:
# We shall combine current image with main image
#mainImage = cv2.imread("mainImage.jpg")
imageB = cv2.imread(root_path + slash + fn)
imageB = imutils.resize(imageB, width=widthResize)
# stitch the images together to create a panorama
stitcher = Stitcher()
result, vis = stitcher.stitch([mainImage, imageB], showMatches=False)
mainImage = cropFocus(result)
# show the images
'''
cv2.imshow("Image A", mainImage)
cv2.imshow("Image B", imageB)
'''
'''
cv2.imwrite("result.jpg", result)
cv2.imshow("Result", result)
cv2.waitKey(0)
'''
i = i + 1
cv2.imwrite("result.jpg", result)
pointresult = result.copy()
pointresult = drawHarris(pointresult)
cv2.imshow("Result", result)
cv2.imshow("Points", pointresult)
cv2.waitKey(0)
| [
"antlowhur@yahoo.com"
] | antlowhur@yahoo.com |
43864d0cecc6965fd89fe49768f52b05fda4096d | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_4/hllbra005/ndom.py | e5b1bd8aa42e852320f2d1d656ace34011f9d230 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,597 | py | # Functions for ndom calculations
# Brandon Hall (HLLBRA005)
# 4 April 2014
def ndom_to_decimal(ndom): # This method converts ndom to decimal numbers, i.e base 6 to base 10
decimal = ndom
ndomS = str(ndom) # Converts the parsed number ndom to a string
length = len(ndomS) # Length of the string is taken
if(length == 2): # If its two numbers
decimal = ( ( int(ndomS[0]) * 6 ) + int(ndomS[1]) )
if (length == 3): # If its three numbers
nd = ( ( int(ndomS[0]) * 6 ) + int(ndomS[1]) )
decimal = (nd*6) + int(ndomS[2])
return decimal
def decimal_to_ndom(decimal):
power = 0
multiple = decimal
re = ""
ans = 0
while (multiple >=1):
re += str((multiple)%6) #remainder
multiple = multiple//6 #base number
return int(re[::-1])
def ndom_add(a,b): # converts to decimal, adds two numbers
# it then converts them into ndom numbers
decA = ndom_to_decimal(a)
decB = ndom_to_decimal(b)
dec = decA+decB
ndom_tot = decimal_to_ndom(dec)
return ndom_tot
def ndom_multiply(a,b): # This method multiplies two ndom numbers
# It does this by multipying the numbers and then
# converting them to ndom
A = ndom_to_decimal(a)
B = ndom_to_decimal(b)
dec = A*B
ndom_tot = decimal_to_ndom(dec)
return ndom_tot | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
ff89354798fc6cbc57a7af70c715c3cdaeb26fd3 | 3fe1b6f36bfd02156f606cf90797d69b18dd19d2 | /creme/optim/newton.py | 4997a95c4ab37fe8b551142380fd0e79804c74ff | [
"BSD-3-Clause"
] | permissive | mihir-thakkar-ai/creme | a19a1975bb462a1a93046b6ea55830e88846cb88 | 008b0c1beb26b36b448fc3d04537e02e66d402b3 | refs/heads/master | 2022-12-18T01:15:18.132117 | 2020-09-15T20:17:16 | 2020-09-15T20:17:16 | 296,288,773 | 0 | 0 | BSD-3-Clause | 2020-09-17T10:04:27 | 2020-09-17T10:04:26 | null | UTF-8 | Python | false | false | 1,182 | py | from .. import utils
from . import base
__all__ = ['Newton']
class Newton(base.Optimizer):
"""Online Newton Step (ONS) optimizer.
This optimizer uses second-order information (i.e. the Hessian of the cost function) in
addition to first-order information (i.e. the gradient of the cost function).
Parameters:
lr
eps
References:
1. [Hazan, E., Agarwal, A. and Kale, S., 2007. Logarithmic regret algorithms for online convex optimization. Machine Learning, 69(2-3), pp.169-192](https://www.cs.princeton.edu/~ehazan/papers/log-journal.pdf)
"""
def __init__(self, lr=.1, eps=1e-5):
super().__init__(lr)
self.eps = eps
self.H_inv = {}
def _update_after_pred(self, w, g):
for i in g:
if (i, i) not in self.H_inv:
self.H_inv[i, i] = self.eps
# Update the Hessian
self.H = utils.math.sherman_morrison(A_inv=self.H_inv, u=g, v=g)
# Calculate the update step
step = utils.math.dotvecmat(x=g, A=self.H_inv)
# Update the weights
for i, s in step.items():
w[i] -= self.learning_rate * s
return w
| [
"maxhalford25@gmail.com"
] | maxhalford25@gmail.com |
78ed70cfb6eeef227fb3bfb09143da364540ab98 | e4ad15cb20c2701f33c60001841a66fc03cd45ff | /pre_process_3.py | 586671edb3888c4c121c715fe9643c56c4479c0a | [
"MIT"
] | permissive | foamliu/Face-Attributes | 90fb70947155d0c773a4bf2888190a843a280db5 | fbf90cd55b01e4b84ec69d01132b4b77e0417952 | refs/heads/master | 2020-05-21T04:59:33.707892 | 2019-09-23T08:18:15 | 2019-09-23T08:18:15 | 185,909,551 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | import pickle
import cv2 as cv
from tqdm import tqdm
from config import im_size, pickle_file_landmarks, pickle_file_aligned
from utils import crop_image
ONE_SECOND = 1
if __name__ == "__main__":
print('loading {}...'.format(pickle_file_landmarks))
with open(pickle_file_landmarks, 'rb') as file:
data = pickle.load(file)
items = data['samples']
print('num_items: ' + str(len(items)))
samples = []
for item in tqdm(items):
try:
full_path = item['full_path']
bbox = item['bboxes'][0]
img = cv.imread(full_path)
img = crop_image(img, bbox)
img = cv.resize(img, (im_size, im_size))
samples.append(item)
except:
pass
print('num_items: ' + str(len(samples)))
print('saving {}...'.format(pickle_file_aligned))
with open(pickle_file_aligned, 'wb') as file:
save = {
'samples': samples
}
pickle.dump(save, file, pickle.HIGHEST_PROTOCOL)
| [
"liuyang12@focusmedia.cn"
] | liuyang12@focusmedia.cn |
4813e58f082e8347dbf5a8f63497e847b4c8ac7f | f3f19eaa73f2adb4375dbe6fbfa89eaaa8796cbc | /code/preprocessing/feature_engineering/rel_feature_groups/dep.py | 630cbe1fce98362a26687ff7ec077c7681d5248d | [] | no_license | sanjukladher/WNUT_2020_RE | 2b4c172de236a7766d27a588aa09a2f2f5d5a402 | 3ea31038bdc4a3b39def354ebee69ab00805ab0d | refs/heads/master | 2023-01-06T04:38:11.726589 | 2020-10-30T03:19:14 | 2020-10-30T03:19:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | from corpus.ProtoFile import Relation
from preprocessing.feature_engineering.datasets import RelationWindow
class DependencyFeatureGroup(object):
def __init__(self):
pass
def convert_window(self, window):
result = []
assert isinstance(window, RelationWindow)
if window.relations is not None:
for rel in window.relations:
assert isinstance(rel, Relation)
result.append([self.et1dw1(rel), # combination of mention entity types
self.et2dw2(rel),
self.h1dw1(rel),
self.h2dw2(rel),
self.et12SameNP(rel),
self.et12SamePP(rel),
self.et12SameVP(rel)
])
# print("done")
return result
@staticmethod
def get_words(tokens):
l= [token.word for token in tokens]
if len(l)==0:
l = [""]
return l
def et1dw1(self, rel):
et = rel.arg1_tag.tag_name
dep = rel.arg1_deps()
return "et1dw1={0}{1}".format(et, dep)
def et2dw2(self, rel):
et = rel.arg2_tag.tag_name
dep = rel.arg2_deps()
return "et2dw2={0}{1}".format(et, dep)
def h1dw1(self, rel):
arg1_tokens = rel.get_arg1_tokens()
words = self.get_words(arg1_tokens)
h1 = words[-1]
dep = rel.arg1_deps()
return "h1dw1={0}{1}".format(h1, dep)
def h2dw2(self, rel):
arg2_tokens = rel.get_arg2_tokens()
words = self.get_words(arg2_tokens)
h2 = words[-1]
dep = rel.arg2_deps()
return "h1dw1={0}{1}".format(h2, dep)
@staticmethod
def et12(rel):
return "et12={0}".format("_".join([rel.arg1_tag.tag_name, rel.arg2_tag.tag_name]))
def et12SameNP(self, rel):
et12 = self.et12(rel)
return "et12SameNP={0}_{1}".format(et12, rel.sameNP())
def et12SamePP(self, rel):
et12 = self.et12(rel)
return "et12SamePP={0}_{1}".format(et12, rel.samePP())
def et12SameVP(self, rel):
et12 = self.et12(rel)
return "et12SameVB={0}_{1}".format(et12, rel.sameVP())
| [
"jeniya.tabassum@gmail.com"
] | jeniya.tabassum@gmail.com |
301c79bdf0ec8be3cc4f05ca54b72e601197f4c9 | bba7f5a363d57473f583747af259f5ff60a53631 | /webrobot/app/main/controller/test_controller.py | 4dcd305dc12cb238fc24ba8826df878acd5d3efb | [
"MIT"
] | permissive | githubwangcc/Auto-Test-System | 63f7953c0dd90859a3cd0b9330e102330df064aa | 4462fde8c23fef625f459d51d6bb7560ba29d726 | refs/heads/master | 2020-09-21T13:58:29.185560 | 2019-10-22T01:23:48 | 2019-10-22T01:23:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,858 | py | import os
import time
from pathlib import Path
from flask import Flask, send_from_directory, request
from flask_restplus import Resource
from app.main.util.decorator import token_required, organization_team_required_by_args
from app.main.util.get_path import get_test_result_path, get_back_scripts_root
from ..config import get_config
from ..model.database import *
from ..util.dto import TestDto
from ..util.tarball import make_tarfile, pack_files
from ..util.errors import *
api = TestDto.api
_test_cases = TestDto.test_cases
_test_suite = TestDto.test_suite
@api.route('/script')
@api.response(404, 'Script not found.')
@api.response(200, 'Download the script successfully.')
class ScriptDownload(Resource):
# @token_required
@api.doc('get_test_script')
@api.param('id', description='The task id')
@api.param('test', description='The test suite name')
def get(self):
"""
Get the test script
Get the bundled file that contains all necessary test scripts that the test needs to run
"""
task_id = request.args.get('id', None)
if not task_id:
return error_message(EINVAL, 'Field id is required'), 400
test_suite = request.args.get('test', None)
if not test_suite:
return error_message(EINVAL, 'Field test is required'), 400
task = Task.objects(pk=task_id).first()
if not task:
return error_message(ENOENT, 'Task not found'), 404
if test_suite.endswith('.py'):
test_suite = test_suite[0:-3]
result_dir = get_test_result_path(task)
scripts_root = get_back_scripts_root(task)
results_tmp = result_dir / 'temp'
script_file = scripts_root / (test_suite + '.py')
if not os.path.exists(script_file):
return error_message(ENOENT, "file {} does not exist".format(script_file)), 404
for _ in range(3):
tarball = pack_files(test_suite, scripts_root, results_tmp)
if tarball is None:
print("retry packaging files")
time.sleep(1)
else:
tarball = os.path.basename(tarball)
return send_from_directory(Path(os.getcwd()) / results_tmp, tarball)
else:
return error_message(EIO, "packaging files failed"), 404
@api.route('/<test_suite>')
@api.param('test_suite', 'The test suite to query')
@api.response(404, 'Script not found.')
class TestSuiteGet(Resource):
@token_required
@organization_team_required_by_args
@api.doc('get_the_test_cases')
@api.marshal_with(_test_cases)
def get(self, test_suite, **kwargs):
"""Get the test cases of a test suite"""
organization = kwargs['organization']
team = kwargs['team']
test = Test.objects(test_suite=test_suite, organization=organization, team=team).first()
if not test:
return error_message(ENOENT, 'Test {} not found'.format(test_suite)), 404
return {
'test_cases': test.test_cases,
'test_suite': test.test_suite
}
@api.route('/')
class TestSuitesList(Resource):
@token_required
@organization_team_required_by_args
@api.doc('get_the_test_suite_list')
@api.marshal_list_with(_test_suite)
def get(self, **kwargs):
"""Get the test suite list which contains some necessary test details"""
organization = kwargs['organization']
team = kwargs['team']
tests = Test.objects(organization=organization, team=team)
ret = []
for t in tests:
ret.append({
'id': str(t.id),
'test_suite': t.test_suite,
'test_cases': t.test_cases,
'variables': t.variables,
'author': t.author.name
})
return ret
| [
"panzilla@163.com"
] | panzilla@163.com |
f21aa89b98bd1ddc09335867c23cbf53a8a6c2a7 | 06f238313235b279cad3ade94cb69f8c4f073215 | /model_verbq_working.py | 5638e6d98b9d212a37be48e2a72ecbf0b599877d | [
"MIT"
] | permissive | thilinicooray/mac-network-pytorch | f26f9fac0e67c21abdff6862a696187c4eb3126e | 0e4bf3f7f301570b652490f697758361c866f3c1 | refs/heads/master | 2020-03-29T04:39:35.133060 | 2019-05-14T12:48:00 | 2019-05-14T12:48:00 | 149,541,433 | 0 | 0 | MIT | 2018-09-20T02:44:20 | 2018-09-20T02:44:20 | null | UTF-8 | Python | false | false | 5,018 | py | import torch
import torch.nn as nn
from attention import Attention, NewAttention
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier
from fc import FCNet
import torch.nn.functional as F
import torchvision as tv
import utils
import numpy as np
import model_verb_directcnn
import model_roles_independent
class vgg16_modified(nn.Module):
def __init__(self):
super(vgg16_modified, self).__init__()
vgg = tv.models.vgg16_bn(pretrained=True)
self.vgg_features = vgg.features
def rep_size(self):
return 1024
def base_size(self):
return 512
def forward(self,x):
#return self.dropout2(self.relu2(self.lin2(self.dropout1(self.relu1(self.lin1(self.vgg_features(x).view(-1, 512*7*7)))))))
features = self.vgg_features(x)
return features
class TopDown(nn.Module):
def __init__(self,
vocab_size,
embed_hidden=300,
mlp_hidden=512):
super(TopDown, self).__init__()
self.vocab_size = vocab_size
self.q_emb = nn.LSTM(embed_hidden, mlp_hidden,
batch_first=True, bidirectional=True)
self.lstm_proj = nn.Linear(mlp_hidden * 2, mlp_hidden)
self.v_att = NewAttention(mlp_hidden, mlp_hidden, mlp_hidden)
self.classifier = nn.Sequential(
nn.Linear(mlp_hidden * 7 *7 + mlp_hidden, mlp_hidden*8),
nn.BatchNorm1d(mlp_hidden*8),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(mlp_hidden * 8, mlp_hidden*8),
nn.BatchNorm1d(mlp_hidden*8),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(mlp_hidden*8, self.vocab_size)
)
def forward(self, img, q):
batch_size = img.size(0)
w_emb = q
self.q_emb.flatten_parameters()
lstm_out, (h, _) = self.q_emb(w_emb)
q_emb = h.permute(1, 0, 2).contiguous().view(batch_size, -1)
q_emb = self.lstm_proj(q_emb)
att = self.v_att(img, q_emb)
v_emb = (att * img)
v_emb = v_emb.permute(0, 2, 1)
v_emb = v_emb.contiguous().view(-1, 512*7*7)
v_emb_with_q = torch.cat([v_emb, q_emb], -1)
logits = self.classifier(v_emb_with_q)
return logits
class BaseModel(nn.Module):
def __init__(self, encoder,
gpu_mode,
embed_hidden=300,
mlp_hidden = 512
):
super(BaseModel, self).__init__()
self.normalize = tv.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.train_transform = tv.transforms.Compose([
tv.transforms.RandomRotation(10),
tv.transforms.RandomResizedCrop(224),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
self.normalize,
])
self.dev_transform = tv.transforms.Compose([
tv.transforms.Resize(224),
tv.transforms.CenterCrop(224),
tv.transforms.ToTensor(),
self.normalize,
])
self.encoder = encoder
self.gpu_mode = gpu_mode
self.mlp_hidden = mlp_hidden
self.verbq_word_count = len(self.encoder.verb_q_words)
self.n_verbs = self.encoder.get_num_verbs()
self.conv = vgg16_modified()
'''for param in self.verb_module.parameters():
param.require_grad = False
for param in self.role_module.parameters():
param.require_grad = False
for param in self.conv.parameters():
param.require_grad = False'''
self.verb_vqa = TopDown(self.n_verbs)
self.verb_q_emb = nn.Embedding(self.verbq_word_count + 1, embed_hidden, padding_idx=self.verbq_word_count)
def train_preprocess(self):
return self.train_transform
def dev_preprocess(self, ):
return self.dev_transform
def forward(self, img, verbs=None, labels=None):
verb_q_idx = self.encoder.get_common_verbq(img.size(0))
if self.gpu_mode >= 0:
verb_q_idx = verb_q_idx.to(torch.device('cuda'))
img_embd = self.conv(img)
batch_size, n_channel, conv_h, conv_w = img_embd.size()
img_embd = img_embd.view(batch_size, n_channel, -1)
img_embd = img_embd.permute(0, 2, 1)
q_emb = self.verb_q_emb(verb_q_idx)
verb_pred = self.verb_vqa(img_embd, q_emb)
loss = self.calculate_loss(verb_pred, verbs)
return verb_pred, loss
def calculate_loss(self, verb_pred, gt_verbs):
batch_size = verb_pred.size()[0]
loss = 0
#print('eval pred verbs :', pred_verbs)
for i in range(batch_size):
verb_loss = 0
verb_loss += utils.cross_entropy_loss(verb_pred[i], gt_verbs[i])
loss += verb_loss
final_loss = loss/batch_size
#print('loss :', final_loss)
return final_loss | [
"thilinicooray.ucsc@gmail.com"
] | thilinicooray.ucsc@gmail.com |
9ada941a10b5da9a1d14e5a9f5e8e2771b3c806c | 27440297f68994be89764ec1eb996df19c408749 | /processing/merge.py | f30b74502636511db234e2edb0563fa361bbf836 | [] | no_license | anoop-phoenix/Web_Presentation | ed084dfb56e8c9c6eb8d5e00b339cb7be989da1d | cb253cb6290a6c52183bae40330d8b79de69bbc1 | refs/heads/master | 2022-07-23T06:17:23.962036 | 2020-05-11T01:56:03 | 2020-05-11T01:56:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,209 | py | import json
import cv2
import numpy as np
from os.path import join as pjoin
import os
from lib_uied.ip_preprocessing import preprocess
from CONFIG import Config
C = Config()
compo_index = {'img':0, 'text':0, 'button':0, 'input':0, 'icon':0}
def draw_bounding_box_class(org, corners, compo_class, color_map=C.COLOR, line=3, show=False, name='img'):
board = org.copy()
for i in range(len(corners)):
if compo_class[i] == 'text':
continue
board = cv2.rectangle(board, (corners[i][0], corners[i][1]), (corners[i][2], corners[i][3]), color_map[compo_class[i]], line)
board = cv2.putText(board, compo_class[i], (corners[i][0]+5, corners[i][1]+20),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, color_map[compo_class[i]], 2)
if show:
cv2.imshow(name, board)
cv2.waitKey(0)
return board
def draw_bounding_box(org, corners, color=(0, 255, 0), line=3, show=False):
board = org.copy()
for i in range(len(corners)):
board = cv2.rectangle(board, (corners[i][0], corners[i][1]), (corners[i][2], corners[i][3]), color, line)
if show:
cv2.imshow('a', board)
cv2.waitKey(0)
return board
def save_clipping(org, corners, compo_classes, compo_index, output_root=C.ROOT_IMG_COMPONENT):
if output_root is None:
output_root = C.ROOT_IMG_COMPONENT
if not os.path.exists(output_root):
os.mkdir(output_root)
pad = 1
for i in range(len(corners)):
compo = compo_classes[i]
(col_min, row_min, col_max, row_max) = corners[i]
col_min = max(col_min - pad, 0)
col_max = min(col_max + pad, org.shape[1])
row_min = max(row_min - pad, 0)
row_max = min(row_max + pad, org.shape[0])
# if component type already exists, index increase by 1, otherwise add this type
compo_path = pjoin(output_root, compo)
if not os.path.exists(compo_path):
os.mkdir(compo_path)
if compo_classes[i] not in compo_index:
compo_index[compo_classes[i]] = 0
else:
compo_index[compo_classes[i]] += 1
clip = org[row_min:row_max, col_min:col_max]
cv2.imwrite(pjoin(compo_path, str(compo_index[compo_classes[i]]) + '.png'), clip)
def save_label_txt(img_path, compo_corners, compo_class, label_txt_path):
f = open(label_txt_path, 'a')
label_txt = img_path + ' '
for i in range(len(compo_corners)):
if compo_class[i] == 'text':
continue
label_txt += ','.join([str(c) for c in compo_corners[i]]) + ',' + str(C.class_index[compo_class[i]]) + ' '
label_txt += '\n'
f.write(label_txt)
def nms(org, corners_compo_old, compos_class_old, corner_text):
corners_compo_refine = []
compos_class_refine = []
corner_text = np.array(corner_text)
for i in range(len(corners_compo_old)):
# if compos_class_old[i] != 'img':
# corners_compo_refine.append(corners_compo_old[i])
# compos_class_refine.append(compos_class_old[i])
# continue
a = corners_compo_old[i]
noise = False
area_a = (a[2] - a[0]) * (a[3] - a[1])
area_text = 0
for b in corner_text:
area_b = (b[2] - b[0]) * (b[3] - b[1])
# get the intersected area
col_min_s = max(a[0], b[0])
row_min_s = max(a[1], b[1])
col_max_s = min(a[2], b[2])
row_max_s = min(a[3], b[3])
w = np.maximum(0, col_max_s - col_min_s + 1)
h = np.maximum(0, row_max_s - row_min_s + 1)
inter = w * h
if inter == 0:
continue
# calculate IoU
ioa = inter / area_a
iob = inter / area_b
if compos_class_old[i] == 'img':
# sum up all text area in a img
# if iob > 0.8:
area_text += inter
# loose threshold for img
if ioa > 0.38:
noise = True
break
else:
# tight threshold for other components
if ioa > 0.8:
noise = True
break
# check if img is text paragraph
if compos_class_old[i] == 'img' and area_text / area_a > 0.8:
noise = True
if not noise:
corners_compo_refine.append(corners_compo_old[i])
compos_class_refine.append(compos_class_old[i])
return corners_compo_refine, compos_class_refine
def refine_text(org, corners_text, max_line_gap, min_word_length):
def refine(bin):
head = 0
rear = 0
gap = 0
get_word = False
for i in range(bin.shape[1]):
# find head
if not get_word and np.sum(bin[:, i]) != 0:
head = i
rear = i
get_word = True
continue
if get_word and np.sum(bin[:, i]) != 0:
rear = i
continue
if get_word and np.sum(bin[:, i]) == 0:
gap += 1
if gap > max_line_gap:
if (rear - head) > min_word_length:
corners_text_refine.append((head + col_min, row_min, rear + col_min, row_max))
gap = 0
get_word = False
if get_word and (rear - head) > min_word_length:
corners_text_refine.append((head + col_min, row_min, rear + col_min, row_max))
corners_text_refine = []
pad = 1
for corner in corners_text:
(col_min, row_min, col_max, row_max) = corner
col_min = max(col_min - pad, 0)
col_max = min(col_max + pad, org.shape[1])
row_min = max(row_min - pad, 0)
row_max = min(row_max + pad, org.shape[0])
clip = org[row_min:row_max, col_min:col_max]
clip_bin = preprocess(clip)
refine(clip_bin)
return corners_text_refine
def incorporate(img_path, compo_path, text_path, output_path_img, output_path_label_txt, img_section, is_clip=False, clip_path=None):
img = cv2.imread(img_path)
img = img[:img_section[0], :img_section[1]]
compo_f = open(compo_path, 'r')
text_f = open(text_path, 'r')
compos = json.load(compo_f)
corners_compo = []
compos_class = []
corners_text = []
for compo in compos['compos']:
corners_compo.append([compo['column_min'], compo['row_min'], compo['column_max'], compo['row_max']])
compos_class.append(compo['class'])
for line in text_f.readlines():
if len(line) > 1:
corners_text.append([int(c) for c in line[:-1].split(',')])
corners_text = refine_text(img, corners_text, 20, 10)
corners_compo_new, compos_class_new = nms(img, corners_compo, compos_class, corners_text)
board = draw_bounding_box_class(img, corners_compo_new, compos_class_new)
save_label_txt(img_path, corners_compo_new, compos_class_new, output_path_label_txt)
cv2.imwrite(output_path_img, board)
print('*** Merge Complete and Save to', output_path_img, '***')
if is_clip:
save_clipping(img, corners_compo_new, compos_class_new, compo_index, clip_path)
| [
"dsh15325@163.com"
] | dsh15325@163.com |
82910b40db0a306227fb84d75d64878ce5263901 | ac415850ca1926439a5f882c4a3b6c6105247149 | /setup.py | db2d3d2e0e3fbbb8faf3f159cb6b0aae0d70b750 | [] | no_license | RedTurtle/collective.itvalidators | 2a4e816628e48e218f82d626f3d48adf191977c3 | 86f59adf51ea185796d7c8e5007e3fd9ebf66f92 | refs/heads/master | 2020-05-20T16:46:43.456230 | 2012-11-12T13:52:01 | 2012-11-12T13:52:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | from setuptools import setup, find_packages
import os
version = '1.0.1.dev0'
tests_require=['zope.testing', 'Products.CMFTestCase']
setup(name='collective.itvalidators',
version=version,
description="A set of Archetypes validators for Plone, some for Italian specific needs, others useful for all",
long_description=open("README.rst").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Framework :: Plone",
"Framework :: Plone :: 3.3",
"Framework :: Plone :: 4.0",
"Framework :: Plone :: 4.1",
"Framework :: Plone :: 4.2",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Development Status :: 5 - Production/Stable",
],
keywords='plone archetypes validator plonegov',
author='RedTurtle Technology',
author_email='sviluppoplone@redturtle.it',
url='http://plone.org/products/collective.itvalidators',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['collective'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'Products.validation',
],
tests_require=tests_require,
extras_require=dict(test=tests_require),
test_suite='collective.itvalidators.tests.test_validation.test_suite',
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
| [
"luca@keul.it"
] | luca@keul.it |
3bae7b1543b54747cd569854abb8d5a2f2c55705 | 1ee2087e1879b3d40661940f630f94576b38a75b | /migrations/versions/275b0e49dff7_.py | 8b691a445a1f3fb66ce336331ce9cbe0af46bfb4 | [
"Apache-2.0"
] | permissive | dpdi-unifor/limonero | be332baf0c3596c2195c8aaaefd67def64a57c8a | 3b46f780f82e1d291ebe0120d95c71e82cd46ed9 | refs/heads/master | 2023-01-03T22:00:06.474233 | 2020-08-06T18:04:21 | 2020-08-06T18:04:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | """empty messagepark-2.3.0/
Revision ID: 275b0e49dff7
Revises: 66d4be40bced
Create Date: 2018-07-11 16:15:33.196417
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
from sqlalchemy.sql import text
# revision identifiers, used by Alembic.
revision = '275b0e49dff7'
down_revision = '66d4be40bced'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('data_source',
sa.Column('command', mysql.LONGTEXT(), nullable=True))
op.get_bind().execute(text(
"ALTER TABLE data_source CHANGE `format` `format` "
"ENUM('XML_FILE','NETCDF4','HDF5','SHAPEFILE','TEXT','CUSTOM','JSON',"
"'CSV','PICKLE','GEO_JSON','JDBC') CHARSET utf8 "
"COLLATE utf8_unicode_ci NOT NULL;"
))
op.get_bind().execute(text("""
ALTER TABLE storage CHANGE `type` `type`
ENUM('HDFS', 'OPHIDIA','ELASTIC_SEARCH','MONGODB',
'POSTGIS','HBASE','CASSANDRA','JDBC') CHARSET utf8 COLLATE utf8_unicode_ci NOT NULL;"""
))
op.add_column('storage', sa.Column('enabled', sa.Boolean(), nullable=False,
server_default=sa.schema.DefaultClause(
"1"), default=1))
op.add_column('data_source',
sa.Column('updated', sa.DateTime(), nullable=False,
server_default='2018-01-01'))
def downgrade():
op.drop_column('data_source', 'command')
try:
op.get_bind().execute(text(
"ALTER TABLE data_source CHANGE `format` `format` "
"ENUM('XML_FILE','NETCDF4','HDF5','SHAPEFILE','TEXT','CUSTOM','JSON',"
"'CSV','PICKLE','GEO_JSON') CHARSET utf8 "
"COLLATE utf8_unicode_ci NOT NULL;"
))
op.get_bind().execute(text("""
ALTER TABLE storage CHANGE `type` `type`
ENUM('HDFS', 'OPHIDIA','ELASTIC_SEARCH','MONGODB',
'POSTGIS','HBASE','CASSANDRA') CHARSET utf8 COLLATE utf8_unicode_ci NOT NULL;"""
))
except:
pass
op.drop_column('storage', 'enabled')
op.drop_column('data_source', 'updated')
| [
"waltersf@gmail.com"
] | waltersf@gmail.com |
5aee70ed6493dc5c7fe1fd68084783b335802ad2 | 4bcc9806152542ab43fc2cf47c499424f200896c | /tensorflow/lite/testing/op_tests/space_to_depth.py | 27f5dbde160fea3d10d81554543841f1152f45b4 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause"
] | permissive | tensorflow/tensorflow | 906276dbafcc70a941026aa5dc50425ef71ee282 | a7f3934a67900720af3d3b15389551483bee50b8 | refs/heads/master | 2023-08-25T04:24:41.611870 | 2023-08-25T04:06:24 | 2023-08-25T04:14:08 | 45,717,250 | 208,740 | 109,943 | Apache-2.0 | 2023-09-14T20:55:50 | 2015-11-07T01:19:20 | C++ | UTF-8 | Python | false | false | 2,125 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for space_to_depth."""
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_space_to_depth_tests(options):
"""Make a set of tests to do space_to_depth."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.uint8, tf.int64],
"input_shape": [[2, 12, 24, 1]],
"block_size": [2, 3, 4],
"fully_quantize": [False],
}, {
"dtype": [tf.float32],
"input_shape": [[2, 12, 24, 1], [1, 12, 24, 1]],
"block_size": [2, 3, 4],
"fully_quantize": [True],
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.compat.v1.space_to_depth(
input=input_tensor, block_size=parameters["block_size"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
parameters["dtype"],
parameters["input_shape"],
min_value=-1,
max_value=1)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
64c18dd4c90df51f1a5970e4cc74abafd124de81 | 18619af2eb81d74d9e76d61971da1f0fe57b4bbb | /stochastic_optimizer/framework/__init__.py | 70d7241d62b8ae87c23c2fac6491ab6c84e3725d | [] | no_license | asahi417/StochasticOptimizers | 4b9245c7fc99e660d9298077823972cf86e21205 | d98c91136835206dc36d9409e425e1caf4fbb275 | refs/heads/master | 2020-12-01T04:37:20.798101 | 2020-01-01T07:50:58 | 2020-01-01T07:50:58 | 230,559,078 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | from .learning_curve_classifier import LearningCurveClassifier
from .grid_search import GridSearch
__all__ = (
"LearningCurveClassifier",
"GridSearch"
)
| [
"spacefunkspacefunk@gmail.com"
] | spacefunkspacefunk@gmail.com |
8e8baac73edc574040ae13f34cab1d3b1185e8ce | d75371f629cf881de3c49b53533879a5b862da2e | /python/flatten-nested-list-iterator.py | 35a303de0ca7014e654d63308aa377c0f04c99be | [] | no_license | michaelrbock/leet-code | 7352a1e56429bb03842b588ba6bda2a90315a2f4 | 070db59d4e0ded3fb168c89c3d73cb09b3c4fe86 | refs/heads/master | 2020-04-01T05:40:49.262575 | 2019-10-10T22:03:10 | 2019-10-10T22:03:10 | 152,914,631 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,838 | py | # """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
#class NestedInteger(object):
# def isInteger(self):
# """
# @return True if this NestedInteger holds a single integer, rather than a nested list.
# :rtype bool
# """
#
# def getInteger(self):
# """
# @return the single integer that this NestedInteger holds, if it holds a single integer
# Return None if this NestedInteger holds a nested list
# :rtype int
# """
#
# def getList(self):
# """
# @return the nested list that this NestedInteger holds, if it holds a nested list
# Return None if this NestedInteger holds a single integer
# :rtype List[NestedInteger]
# """
class NestedIterator(object):
def __init__(self, nestedList):
"""
Initialize your data structure here.
:type nestedList: List[NestedInteger]
"""
self.nested_list = nestedList
def next(self):
"""
:rtype: int
"""
def hasNext(self):
"""
:rtype: bool
"""
# Your NestedIterator object will be instantiated and called as such:
# i, v = NestedIterator(nestedList), []
# while i.hasNext(): v.append(i.next())
i, v = NestedIterator([[1,1],2,[1,1]]), []
while i.hasNext(): v.append(i.next())
assert v == [1,1,2,1,1]
# Explanation: By calling next repeatedly until hasNext returns false,
# the order of elements returned by next should be: [1,1,2,1,1].
# Example 2:
i, v = NestedIterator([1,[4,[6]]]), []
while i.hasNext(): v.append(i.next())
assert v == [1,4,6]
# Explanation: By calling next repeatedly until hasNext returns false,
# the order of elements returned by next should be: [1,4,6]. | [
"mykel.bock@gmail.com"
] | mykel.bock@gmail.com |
3eb8e8be307b73db69c95441580767c265d74e73 | 03e3138f99f275d15d41a5c5bfb212f85d64d02e | /source/res/scripts/client/gui/Scaleform/daapi/view/lobby/profile/ProfileHof.py | 84be578dc3dfaf43f6f2f9fbcf9766701417e1b0 | [] | no_license | TrenSeP/WorldOfTanks-Decompiled | e428728e7901146d0b599d02c930d70532232a97 | 1faa748acec1b7e435b657fd054ecba23dd72778 | refs/heads/1.4.1 | 2020-04-27T08:07:49.813023 | 2019-03-05T17:37:06 | 2019-03-05T17:37:06 | 174,159,837 | 1 | 0 | null | 2019-03-06T14:33:33 | 2019-03-06T14:24:36 | Python | UTF-8 | Python | false | false | 7,566 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/profile/ProfileHof.py
from functools import partial
import BigWorld
from adisp import process
from debug_utils import LOG_WARNING, LOG_ERROR
from helpers import dependency, i18n
from gui.Scaleform import MENU
from gui.Scaleform.locale.WAITING import WAITING
from gui.shared.formatters import icons
from skeletons.gui.web import IWebController
from skeletons.gui.server_events import IEventsCache
from gui import DialogsInterface
from gui.Scaleform.daapi.settings.views import VIEW_ALIAS
from gui.Scaleform.daapi.view.lobby.hof.hof_helpers import getHofAchievementsRatingUrl, getHofVehiclesRatingUrl, isHofButtonNew, setHofButtonOld, getHofDisabledKeys, onServerSettingsChange
from gui.Scaleform.daapi.view.lobby.hof.web_handlers import createHofWebHandlers
from gui.Scaleform.daapi.view.meta.ProfileHofMeta import ProfileHofMeta
from gui.Scaleform.genConsts.PROFILE_CONSTANTS import PROFILE_CONSTANTS
from gui.shared import g_eventBus, events, EVENT_BUS_SCOPE
class ProfileHof(ProfileHofMeta):
_eventsCache = dependency.descriptor(IEventsCache)
_clansController = dependency.descriptor(IWebController)
_errorsStatusMap = {'1004': PROFILE_CONSTANTS.HOF_SPECIAL_CASES,
'1005': PROFILE_CONSTANTS.HOF_SPECIAL_CASES,
'1015': PROFILE_CONSTANTS.HOF_SPECIAL_CASES,
'1016': PROFILE_CONSTANTS.HOF_SPECIAL_CASES,
'1003': PROFILE_CONSTANTS.HOF_RESULTS_HIDE,
'1006': PROFILE_CONSTANTS.HOF_RESULTS_EXCLUSION,
'1007': PROFILE_CONSTANTS.HOF_RESULTS_INCLUSION}
_requestRetriesCount = 3
_retryDelay = 0.5
_bgPath = '../maps/icons/hof/hof_back_landing.png'
_buttonsWithCounter = (PROFILE_CONSTANTS.HOF_ACHIEVEMENTS_BUTTON, PROFILE_CONSTANTS.HOF_VEHICLES_BUTTON)
def __init__(self, *args):
super(ProfileHof, self).__init__(*args)
self.__status = PROFILE_CONSTANTS.HOF_RESULTS_SHOW
self.__retriesCount = 0
self.__isMaintenance = False
self.__viewDisposed = False
self.__requestProcessing = False
self.__retryCallback = None
return
def showAchievementsRating(self):
setHofButtonOld(PROFILE_CONSTANTS.HOF_ACHIEVEMENTS_BUTTON)
self.__openHofBrowserView(getHofAchievementsRatingUrl())
def showVehiclesRating(self):
setHofButtonOld(PROFILE_CONSTANTS.HOF_VEHICLES_BUTTON)
self.__openHofBrowserView(getHofVehiclesRatingUrl())
@process
def changeStatus(self):
if self.__status == PROFILE_CONSTANTS.HOF_RESULTS_SHOW:
success = yield DialogsInterface.showI18nConfirmDialog('hof/excludeRating')
if success:
self.__makeRequest(self._clansController.getClanDossier().requestHofUserExclude, PROFILE_CONSTANTS.HOF_RESULTS_EXCLUSION, lambda errorCode: self.__getRatingStatus())
elif self.__status == PROFILE_CONSTANTS.HOF_RESULTS_HIDE:
self.__makeRequest(self._clansController.getClanDossier().requestHofUserRestore, PROFILE_CONSTANTS.HOF_RESULTS_INCLUSION, lambda errorCode: self.__getRatingStatus())
else:
LOG_WARNING('Something went wrong! Getting actual status.')
self.__getRatingStatus()
def onSectionActivated(self):
if self.lobbyContext.getServerSettings().bwHallOfFame.isStatusEnabled:
if self.__requestProcessing:
LOG_WARNING('ProfileHof request canceled: another request is processing')
else:
self.__getRatingStatus()
else:
self.as_setStatusS(PROFILE_CONSTANTS.HOF_SPECIAL_CASES)
def _populate(self):
super(ProfileHof, self)._populate()
self.lobbyContext.getServerSettings().onServerSettingsChange += self.__onServerSettingChanged
self.as_setBackgroundS(self._bgPath)
self.as_setBtnCountersS(self.__getCountersList())
def _dispose(self):
if self.__retryCallback:
LOG_WARNING('ProfileHof request canceled: ProfileHof view was disposed')
BigWorld.cancelCallback(self.__retryCallback)
self.__viewDisposed = True
self.lobbyContext.getServerSettings().onServerSettingsChange -= self.__onServerSettingChanged
super(ProfileHof, self)._dispose()
def __getCountersList(self):
counters = []
for buttonName in self._buttonsWithCounter:
if isHofButtonNew(buttonName):
counters.append({'componentId': buttonName,
'count': '1'})
return counters
def __getRatingStatus(self):
def handleError(errorCode):
status = self._errorsStatusMap.get(errorCode)
if status:
self.__status = status
self.as_setStatusS(status)
else:
LOG_ERROR('Unknown error code: ' + str(errorCode))
self.__makeRequest(self._clansController.getClanDossier().requestHofUserInfo, PROFILE_CONSTANTS.HOF_RESULTS_SHOW, handleError)
@process
def __makeRequest(self, requestFunc, successStatus, errorCallback):
if self.__retriesCount == 0:
if not self.__isMaintenance:
self.as_showWaitingS(WAITING.HOF_LOADING)
self.__requestProcessing = True
else:
self.__retryCallback = None
response = yield requestFunc()
if self.__viewDisposed:
LOG_WARNING('ProfileHof request canceled: ProfileHof view was disposed')
return
else:
if response:
self.__refreshRequest()
if self.__isMaintenance:
self.as_hideServiceViewS()
self.as_setBtnCountersS(self.__getCountersList())
self.__isMaintenance = False
errors = response.getErrors()
if not errors:
self.__status = successStatus
self.as_setStatusS(successStatus)
else:
errorCallback(errors[0])
elif self.__retriesCount < self._requestRetriesCount:
self.__retriesCount += 1
self.__retryCallback = BigWorld.callback(self._retryDelay, partial(self.__makeRequest, requestFunc, successStatus, errorCallback))
else:
self.__refreshRequest()
if not self.__isMaintenance:
self.__isMaintenance = True
header = icons.alert() + i18n.makeString(MENU.BROWSER_DATAUNAVAILABLE_HEADER)
description = i18n.makeString(MENU.BROWSER_DATAUNAVAILABLE_DESCRIPTION)
self.as_showServiceViewS(header, description)
self.as_setBtnCountersS([])
return
def __refreshRequest(self):
self.__retriesCount = 0
if not self.__isMaintenance:
self.as_hideWaitingS()
self.__requestProcessing = False
def __openHofBrowserView(self, url):
self._eventsCache.onProfileVisited()
g_eventBus.handleEvent(events.LoadViewEvent(VIEW_ALIAS.BROWSER_VIEW, ctx={'url': url,
'returnAlias': VIEW_ALIAS.LOBBY_PROFILE,
'allowRightClick': True,
'webHandlers': createHofWebHandlers(),
'selectedAlias': VIEW_ALIAS.PROFILE_HOF,
'disabledKeys': getHofDisabledKeys(),
'onServerSettingsChange': onServerSettingsChange}), EVENT_BUS_SCOPE.LOBBY)
def __onServerSettingChanged(self, diff):
if 'hallOfFame' in diff:
self.onSectionActivated()
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
38af75495681eb2f6ff8f41b12bb003b2b7641d6 | 3daf036e3911c00e50fb76e083ada2134ff1758f | /firefox/export_firefox_cookies.py | 861a3b9d04167e0ea5495f881f256632f0561819 | [
"MIT"
] | permissive | jabbalaci/Bash-Utils | 954b234148745a9d73747392b137884ee7817246 | c880ff48eafb0f8f5f60f62d9cc3ddbbc0dd88b7 | refs/heads/master | 2023-05-01T22:40:46.713341 | 2023-04-24T14:30:25 | 2023-04-24T14:30:25 | 1,561,380 | 91 | 32 | null | null | null | null | UTF-8 | Python | false | false | 3,941 | py | #!/usr/bin/env python3
"""
Extract Firefox cookies
=======================
This script extracts cookies from Firefox's cookies.sqlite file
that are specific to a given host. The exported cookies are saved
in the file cookies.txt .
New! It also exports session cookies from Firefox's recovery.js file.
The exported cookies are saved to session_cookies.txt .
Then, you can use this exported file with wget to download content
that require authentication via cookies:
wget --cookies=on --load-cookies=cookies.txt --keep-session-cookies "http://..."
The original script was written by Dirk Sohler:
https://old.0x7be.de/2008/06/19/firefox-3-und-cookiestxt/
This version is a bit refactored and extended with session cookies.
Website: https://ubuntuincident.wordpress.com/2011/09/05/download-pages-with-wget-that-are-protected-by-cookies/
GitHub: https://github.com/jabbalaci/Bash-Utils (see the firefox/ folder)
Last update: 2017-05-17 (yyyy-mm-dd)
"""
import json
import os
import pprint
import sqlite3 as db
import sys
from pathlib import Path
FIREFOX_DIR = Path(os.path.expanduser('~'), '.mozilla', 'firefox')
COOKIES_TXT = 'cookies.txt'
SESSION_COOKIES_TXT = 'session_cookies.txt'
CONTENTS = "host, path, isSecure, expiry, name, value"
def get_cookie_db_path(firefox_dir):
for e in os.listdir(firefox_dir):
if e.endswith('.default'):
p = Path(firefox_dir, e, 'cookies.sqlite')
if not p.is_file():
print("Error: the file '{0}' doesn't exist".format(str(p)), file=sys.stderr)
sys.exit(1)
else:
return str(p)
# else
print("Error: the user dir. was not found in '{0}'".format(firefox_dir), file=sys.stderr)
sys.exit(1)
def get_recovery_js_path(firefox_dir):
for e in os.listdir(firefox_dir):
if e.endswith('.default'):
p = Path(firefox_dir, e, 'sessionstore-backups', 'recovery.js')
if not p.is_file():
print("Error: the file '{0}' doesn't exist".format(str(p)), file=sys.stderr)
sys.exit(1)
else:
return str(p)
# else
print("Error: the user dir. was not found in '{0}'".format(firefox_dir), file=sys.stderr)
sys.exit(1)
def extract_cookies(host):
"""
Extract cookies from cookies.sqlite.
"""
cookie_db = get_cookie_db_path(str(FIREFOX_DIR))
print("# working with", cookie_db)
conn = db.connect(cookie_db)
cursor = conn.cursor()
sql = "SELECT {c} FROM moz_cookies WHERE host LIKE '%{h}%'".format(c=CONTENTS, h=host)
cursor.execute(sql)
out = open(COOKIES_TXT, 'w')
cnt = 0
for row in cursor.fetchall():
s = "{0}\tTRUE\t{1}\t{2}\t{3}\t{4}\t{5}\n".format(row[0], row[1],
str(bool(row[2])).upper(), row[3], str(row[4]), str(row[5]))
out.write(s)
cnt += 1
print("Search term: {0}".format(host))
print("Exported: {0}".format(cnt))
out.close()
conn.close()
def extract_session_cookies(host):
"""
Extract session cookies from recovery.js.
"""
fname = get_recovery_js_path(str(FIREFOX_DIR))
print("# working with", fname)
with open(fname) as f:
d = json.load(f)
cookies = d['windows'][0]['cookies']
cnt = 0
with open(SESSION_COOKIES_TXT, "w") as f:
for c in cookies:
if host in c['host']:
res = {
c['name']: c['value'],
}
print(json.dumps(res, indent=2), file=f)
cnt += 1
#
#
#
print("Exported: {0}".format(cnt))
#############################################################################
if __name__ == "__main__":
if len(sys.argv) == 1:
print("{0}: specify the host".format(Path(sys.argv[0]).name))
sys.exit(1)
# else
host = sys.argv[1]
extract_cookies(host)
extract_session_cookies(host)
| [
"jabba.laci@gmail.com"
] | jabba.laci@gmail.com |
fddabc87a3bd3b32b19da51b8a145cb38e9f1ca1 | 7bcec8a9c6a240ec0888bec4179f536046464005 | /moviesys/moviesys/.history/library/admin_20210318134349.py | 76ed2874fbcd5d1186ba17a3a66fef09063aae9c | [] | no_license | yifanzhang13/MovieManagementSystem_group5 | c64e5810914c3d33ae6cd94e8eed5dc5a3962181 | 4cca1a4299311681d69b2347ca8d7b02e0846ebc | refs/heads/main | 2023-03-29T08:30:26.655108 | 2021-04-01T15:42:52 | 2021-04-01T15:42:52 | 344,417,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | from django.contrib import admin
# Register your models here.
from .models import Movies, Users, Ratings, Links, Tags
# register models
# admin.site.register(Movies)
admin.site.register(Users)
admin.site.register(Ratings)
admin.site.register(Links)
admin.site.register(Tags)
@admin.register(Movies)
class MoviesAdmin(admin.ModelAdmin):
list_display = ('MovieID', 'MovieTitle', 'MovieGenres')
| [
"yifancheung13@gmail.com"
] | yifancheung13@gmail.com |
ade285186c539e4e1bfac84b045c7a325362486d | 51a37b7108f2f69a1377d98f714711af3c32d0df | /src/leetcode/P5667.py | b91f9dbfe363084ec778918ed185877ab603d56c | [] | no_license | stupidchen/leetcode | 1dd2683ba4b1c0382e9263547d6c623e4979a806 | 72d172ea25777980a49439042dbc39448fcad73d | refs/heads/master | 2022-03-14T21:15:47.263954 | 2022-02-27T15:33:15 | 2022-02-27T15:33:15 | 55,680,865 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,218 | py | import bisect
class Solution:
def canEat(self, candiesCount, queries):
n = len(candiesCount)
s = [0] * (n + 1)
for i, v in enumerate(candiesCount):
s[i + 1] = s[i] + v
ret = []
for q in queries:
t, d, c = q
l = 1 * (d + 1)
r = c * (d + 1)
cl = bisect.bisect_left(s, l)
cr = bisect.bisect_left(s, r)
if cl - 1 <= t <= cr - 1:
ret.append(True)
else:
ret.append(False)
return ret
if __name__ == '__main__':
print(Solution().canEat(
[46, 5, 47, 48, 43, 34, 15, 26, 11, 25, 41, 47, 15, 25, 16, 50, 32, 42, 32, 21, 36, 34, 50, 45, 46, 15, 46, 38,
50, 12, 3, 26, 26, 16, 23, 1, 4, 48, 47, 32, 47, 16, 33, 23, 38, 2, 19, 50, 6, 19, 29, 3, 27, 12, 6, 22, 33,
28, 7, 10, 12, 8, 13, 24, 21, 38, 43, 26, 35, 18, 34, 3, 14, 48, 50, 34, 38, 4, 50, 26, 5, 35, 11, 2, 35, 9,
11, 31, 36, 20, 21, 37, 18, 34, 34, 10, 21, 8, 5],
[[80, 2329, 69], [14, 1485, 76], [33, 2057, 83], [13, 1972, 27], [11, 387, 25], [24, 1460, 47], [22, 1783,
35], [1, 513,
33], [66,
2124,
85], [
19, 642, 26], [15, 1963, 79], [93, 722, 96], [15, 376, 88], [60, 1864, 89], [86, 608, 4], [98, 257, 35], [
35, 651, 47], [96, 795, 73], [62, 2077, 18], [27, 1724, 57], [34, 1984, 75], [49, 2413, 95], [76, 1664,
5], [28, 38,
13], [
85, 54, 42], [12, 301, 3], [62, 2016, 29], [45, 2316, 37], [43, 2360, 28], [87, 192, 98], [27, 2082, 21], [
74, 762, 37], [51, 35, 17], [73, 2193, 4], [60, 425, 65], [11, 1522, 58], [21, 1699, 66], [42, 1473, 5], [
30, 2010, 48], [91, 796, 74], [82, 2162, 31], [23, 2569, 65], [24, 684, 23], [70, 1219, 51], [5, 1817,
15], [81,
2446,
34], [
96, 771, 60], [49, 1171, 60], [41, 567, 67], [39, 799, 59], [90, 957, 81], [84, 2122, 27], [82, 1707,
44], [11, 1889,
20], [80,
1697,
83], [
24, 1786, 60], [90, 1847, 99], [51, 114, 21], [44, 466, 85], [56, 469, 20], [44, 350, 96], [66, 1946,
10], [14, 2470,
12], [69,
1175,
18], [
98, 1804, 25], [77, 2187, 40], [89, 2265, 45], [19, 2246, 45], [40, 2373, 79], [60, 2222, 17], [37, 385,
5], [97,
1759,
97], [
10, 903, 5], [87, 842, 45], [74, 2398, 66], [62, 49, 94], [48, 156, 77], [76, 2310, 80], [64, 2360, 95], [
70, 1699, 83], [39, 1241, 66], [92, 2312, 21], [63, 2148, 29], [95, 594, 74], [89, 90, 51], [82, 137,
70], [54, 301,
97], [
15, 819, 43], [47, 1402, 60], [17, 2377, 43], [50, 1937, 95], [62, 1174, 74], [67, 1411, 87], [39, 1151,
48]]))
| [
"stupidchen@foxmail.com"
] | stupidchen@foxmail.com |
e1488fdab650b9fd0136aa331c3c6462879aeb1a | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf/gsn-edf_ut=2.0_rd=0.8_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=37/params.py | 9cc08e99d9f5bb4bdff5449574abf1e1a429bfd7 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | {'cpus': 4,
'duration': 30,
'final_util': '2.027810',
'max_util': '2.0',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.8',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 37,
'utils': 'uni-medium-3'}
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
f32820f7dc5afdb06d623413c51c3fa851319acd | ab5cdf8f2de94c327e4679da84f941b1f3c04db4 | /kubernetes/client/models/v1beta1_host_port_range.py | 580abcc558942c90a636d2190bec5623cb540642 | [
"Apache-2.0"
] | permissive | diannaowa/client-python | a4a92a125178db26004eaef5062f9b1b581b49a8 | 5e268fb0b6f21a535a14a7f968b84ed4486f6774 | refs/heads/master | 2020-12-02T22:06:03.687696 | 2017-06-30T21:42:50 | 2017-06-30T21:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,825 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1HostPortRange(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, max=None, min=None):
"""
V1beta1HostPortRange - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'max': 'int',
'min': 'int'
}
self.attribute_map = {
'max': 'max',
'min': 'min'
}
self._max = max
self._min = min
@property
def max(self):
"""
Gets the max of this V1beta1HostPortRange.
max is the end of the range, inclusive.
:return: The max of this V1beta1HostPortRange.
:rtype: int
"""
return self._max
@max.setter
def max(self, max):
"""
Sets the max of this V1beta1HostPortRange.
max is the end of the range, inclusive.
:param max: The max of this V1beta1HostPortRange.
:type: int
"""
if max is None:
raise ValueError("Invalid value for `max`, must not be `None`")
self._max = max
@property
def min(self):
"""
Gets the min of this V1beta1HostPortRange.
min is the start of the range, inclusive.
:return: The min of this V1beta1HostPortRange.
:rtype: int
"""
return self._min
@min.setter
def min(self, min):
"""
Sets the min of this V1beta1HostPortRange.
min is the start of the range, inclusive.
:param min: The min of this V1beta1HostPortRange.
:type: int
"""
if min is None:
raise ValueError("Invalid value for `min`, must not be `None`")
self._min = min
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1HostPortRange):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
f5f364ce6f2c28ed585f25e0559cd6f7cccfc170 | 4302fd10583ccff63ff5693bd2ae5903323cb769 | /curate/migrations/0005_remove_study_is_replication.py | b6ec819551ad0d855253b9dfe55fef304f4a0d25 | [
"MIT"
] | permissive | ScienceCommons/curate_science | 1faf742c8de1e9c9180e4d8ec6a7457ad95bb705 | 4e4072e8c000df0d2e80637016f8f0e667f4df54 | refs/heads/master | 2022-02-12T19:56:51.730534 | 2022-01-25T16:44:54 | 2022-01-25T16:44:54 | 149,122,317 | 14 | 7 | MIT | 2021-03-23T17:27:05 | 2018-09-17T12:32:25 | HTML | UTF-8 | Python | false | false | 334 | py | # Generated by Django 2.1.1 on 2018-09-27 02:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('curate', '0004_auto_20180921_0646'),
]
operations = [
migrations.RemoveField(
model_name='study',
name='is_replication',
),
]
| [
"alex.kyllo@gmail.com"
] | alex.kyllo@gmail.com |
fd4b65d59d854cc6badda13d543369570401228a | f26937e8cd0b07589ba1cf6275596d97488cda7e | /scrapySpider/fenbu/fenbu/spiders/ff.py | aa080ce347819359cc930f96fdb95135e9cc23bc | [] | no_license | HezhouW/hive | 4aa46a045d22de121e2903075e74c3c9fd75ec1f | 3a7de0c18cbe0ec81e0b40c3217dd5b1a15cf464 | refs/heads/master | 2022-02-27T04:52:42.704501 | 2019-05-24T02:40:49 | 2019-05-24T02:40:49 | 123,524,369 | 1 | 0 | null | 2018-03-02T03:18:07 | 2018-03-02T03:18:07 | null | UTF-8 | Python | false | false | 308 | py | # -*- coding: utf-8 -*-
import scrapy
from scrapy_redis.spiders import RedisCrawlSpider
from fenbu.items import FenbuItem
import re
import redis
class MoviespiderSpider(RedisCrawlSpider):
name = 'ff'
redis_key = 'fenbuSpider:start_urls'
def parse(self , response):
print(response.text) | [
"954316227@qq.com"
] | 954316227@qq.com |
feb0de67b1279c42a7f67cadd8c3b7566685ac4c | 2a28a94fc8eb08961e76c61ab73889135153502b | /asposecellscloud/requests/delete_horizontal_page_break_request.py | b8b9619e62e7eae4af87a1eeb338ddd0627b0f6f | [
"MIT"
] | permissive | aspose-cells-cloud/aspose-cells-cloud-python | 45fc7e686b442302a29a8223e7dbddb71950438c | 270d70ce7f8f3f2ecd9370b1dacfc4789293097e | refs/heads/master | 2023-09-04T01:29:44.242037 | 2023-08-23T13:13:30 | 2023-08-23T13:13:30 | 123,092,364 | 6 | 5 | null | null | null | null | UTF-8 | Python | false | false | 4,339 | py | # coding: utf-8
"""
<copyright company="Aspose" file="DeleteHorizontalPageBreakRequest.cs">
Copyright (c) 2023 Aspose.Cells Cloud
</copyright>
<summary>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
</summary>
"""
import json
from six import iteritems
from asposecellscloud import *
from asposecellscloud.models import *
from asposecellscloud.requests import *
from six.moves.urllib.parse import quote
class DeleteHorizontalPageBreakRequest(object):
def __init__(self , name ,sheet_name ,index ,folder =None ,storage_name =None ):
self.name = name
self.sheet_name = sheet_name
self.index = index
self.folder = folder
self.storage_name = storage_name
def create_http_request(self, api_client):
# verify the required parameter 'name' is set
if self.name is None:
raise ValueError("Missing the required parameter `name` when calling `delete_horizontal_page_break`")
# verify the required parameter 'sheet_name' is set
if self.sheet_name is None:
raise ValueError("Missing the required parameter `sheet_name` when calling `delete_horizontal_page_break`")
# verify the required parameter 'index' is set
if self.index is None:
raise ValueError("Missing the required parameter `index` when calling `delete_horizontal_page_break`")
collection_formats = {}
path_params = {}
if self.name is not None:
path_params['name'] = self.name
if self.sheet_name is not None:
path_params['sheetName'] = self.sheet_name
if self.index is not None:
path_params['index'] = self.index
query_params = []
if self.folder is not None:
query_params.append(('folder',self.folder ))
if self.storage_name is not None:
query_params.append(('storageName',self.storage_name ))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
resource_path = "/cells/{name}/worksheets/{sheetName}/horizontalpagebreaks/{index}"
# path parameters
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
resource_path = resource_path.replace('{%s}' % k, quote(str(v), safe='/'))
return {
"method": "DELETE",
"path":resource_path,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"files":local_var_files,
"auth_settings":auth_settings,
"body": body_params,
"collection_formats": collection_formats,
"response_type": 'CellsCloudResponse'
}
| [
"roy.wang@aspose.com"
] | roy.wang@aspose.com |
da0594e90c26aaae98ad9da7493093c03cf47b0b | 0a11a15cf64e25585d28f484bb2118e8f858cfeb | /알고리즘/알고리즘문제/4869_종이붙이기.py | 288c845e4383a2dd1780008c74d376f54243f413 | [] | no_license | seoul-ssafy-class-2-studyclub/GaYoung_SSAFY | 7d9a44afd0dff13fe2ba21f76d0d99c082972116 | 23e0b491d95ffd9c7a74b7f3f74436fe71ed987d | refs/heads/master | 2021-06-30T09:09:00.646827 | 2020-11-30T14:09:03 | 2020-11-30T14:09:03 | 197,476,649 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | def paper(N):
if N == 10:
return 1
if (N // 10) % 2 == 1: # 홀수
return paper(N - 10) * 2 - 1
else:
return paper(N - 10) * 2 + 1
for t in range(int(input())):
N = int(input())
print('#{} {}'.format(t+1, paper(N)))
| [
"gyyoon4u@naver.com"
] | gyyoon4u@naver.com |
d20b982fe3c60329974052d5ba1eeb74eab893e8 | 9c84f9d5dc15a7aa5d1caf05b6ae5ea83e39be3a | /python_stack/django/django_full_stack/BellReview/BellReview/settings.py | e601fdec278ca591be2c1b97ab65472513bdbd3b | [] | no_license | tomnguyen103/Coding_Dojo | 0fc4007296feb775b4bcd6ee98f66286b2786adb | ec46b866fc7e58a37d07b63b26b38d19eaeb96f6 | refs/heads/master | 2022-12-28T03:47:57.172540 | 2020-06-15T23:03:50 | 2020-06-15T23:03:50 | 212,214,976 | 1 | 0 | null | 2022-12-11T18:36:51 | 2019-10-01T22:59:37 | Python | UTF-8 | Python | false | false | 3,123 | py | """
Django settings for BellReview project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'kffzdwq*+%@j2rnp%f7h!8447u%h-4cm7@$9(4%zm0k$roc&zl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.main',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'BellReview.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'BellReview.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"huuthong103@gmail.com"
] | huuthong103@gmail.com |
eca0527dc6e1af8eb14ad2e48ab7b65a7961662a | c1a4742ecd23941140b57cfd61759aa3901e0711 | /src/apps/boards/views.py | 68afa2b13811821ff0bc49eb05d9c7d3ea14862e | [] | no_license | aodarc/django-trello | 31a5b6813f5136b427c483c0d329ec8c231888d0 | ee00fc5a71e2a7003118542b6b8caffaa73bc9b8 | refs/heads/master | 2023-06-29T14:42:05.027572 | 2021-07-26T15:28:28 | 2021-07-26T15:28:28 | 389,680,626 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,806 | py | from django.contrib.auth.decorators import login_required
from django.db.models import Count, Prefetch
from django.http import HttpResponseRedirect
# Create your views here.
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.views import generic
from rest_framework import generics as rest_generic
from apps.boards.models import Board, Comment, Task
from apps.boards.serializers.comment import CommentSerializer
from apps.boards.serializers.tasks import TaskSerializer
from common.permissions import IsOwnerOrReadOnly
class CreateCommentView(generic.CreateView):
model = Comment
fields = ["message"]
template_name = 'boards/create_comment_form.html'
success_url = reverse_lazy('home:home-page')
def form_valid(self, form):
obj = form.save(commit=False)
obj.created_by = self.request.user
obj.task = self.request.user.tasks.last()
obj.save()
return HttpResponseRedirect(self.get_success_url())
class DeleteComment(generic.DeleteView):
model = Comment
success_url = reverse_lazy('home:home-page')
template_name = 'boards/delete_comments.html'
def get_queryset(self):
return super(DeleteComment, self).get_queryset().filter(created_by=self.request.user)
class BoardDetailView(generic.DetailView):
model = Board
context_object_name = 'board'
template_name = 'boards/board-page.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(BoardDetailView, self).dispatch(*args, **kwargs)
def get_queryset(self):
prefetch_tasks = Prefetch(
'cols__tasks',
queryset=Task.objects.select_related('col') \
.prefetch_related('comments')
.annotate(comments_count=Count('comments')) \
.exclude(status=Task.STATUS_ARCHIVED)
)
return super(BoardDetailView, self).get_queryset() \
.select_related('owner') \
.prefetch_related('users', 'cols', prefetch_tasks) \
.filter(users=self.request.user)
class CommentListCreateAPIView(rest_generic.ListCreateAPIView):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
permission_classes = [IsOwnerOrReadOnly]
# def get_queryset(self):
# return self.queryset.filter(create_by=self.request.user)
# def get_serializer_class(self):
# if self.request.version == 'v1':
# return "CommentSerializerV1"
# return CommentSerializer
class TaskListCreateAPIView(rest_generic.ListCreateAPIView):
queryset = Task.objects.select_related('created_by').prefetch_related('comments').all()
serializer_class = TaskSerializer
# permission_classes = [IsOwnerOrReadOnly]
| [
"odarchenko@ex.ua"
] | odarchenko@ex.ua |
96c18d0ab5d9ca7292ba91d87de1eb104dda90bd | 69145e4b94bd6225138a57305fc09a1c714ebca7 | /home/migrations/0003_resume.py | d1d56477c33b114530c483f060458b5a44616366 | [
"MIT"
] | permissive | SimonOkello/portfolio | 09504163b34559af6119a89c7d3368e45025bbaa | 8b2436399ba1d686769a88c87567ed5e86b797a4 | refs/heads/main | 2021-12-02T18:58:22.120534 | 2021-10-10T10:55:05 | 2021-10-10T10:55:05 | 412,837,378 | 0 | 0 | MIT | 2021-10-09T09:20:20 | 2021-10-02T15:35:14 | Python | UTF-8 | Python | false | false | 832 | py | # Generated by Django 3.2.7 on 2021-10-03 16:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('home', '0002_service'),
]
operations = [
migrations.CreateModel(
name='Resume',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('c_vitae', models.FileField(upload_to='media')),
('cover_letter', models.FileField(upload_to='media')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"simonokello93@gmail.com"
] | simonokello93@gmail.com |
b6ef4a8d17124102cba4c340c0adaa7d224bd5c3 | b72b41f8191e44ad4b70355ed2c26ea7feb0e1d0 | /main.py | 904a1e4e5ca193586ed8b1d462e14c5b8b9e4054 | [
"BSD-3-Clause"
] | permissive | dendisuhubdy/neuron-swarms | 6b25bace21f6116790904cc999e0a9540985251b | ceb8854a580abb825155c362dc2e8f801f950ed0 | refs/heads/master | 2021-08-16T10:39:56.229663 | 2017-11-19T17:05:46 | 2017-11-19T17:05:46 | 110,992,001 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 501 | py | import numpy as np
import time
from visdom import Visdom
from scipy.integrate import odeint
from swarmalators import Swarmalarator
viz = Visdom(server='http://suhubdy.com', port=51401)
def main():
# Load simulation parameters
a, dt, T, n, L = 1, 0.5, 500, 100, 1 # surprisingly, dt = 0.5 seems to work OK (for prelimiart)
swarms = Swarmalarator(a,dt,T,n,L)
x, y, theta = swarms.solve()
#Plot at end
swarms.scatter_t(x,y,theta,-1)
if __name__=="__main__":
main()
| [
"suhubdyd@iro.umontreal.ca"
] | suhubdyd@iro.umontreal.ca |
5c0544ab8f5d844f75a21896e5c0928fd3feac1c | 8dd53a5d1820ae5a3efe799381a90c977afd32c4 | /test/functional/wallet_keypool_topup.py | 8e3ca127b17210112557981640e1cf17922daad5 | [
"MIT"
] | permissive | mulecore/mulecoin | 8b654817a1b78c9e98f96bfef5febaca23347f64 | e52131742938ae433463f32680837981a5cedc0f | refs/heads/master | 2023-03-28T05:37:53.552271 | 2021-03-27T03:22:13 | 2021-03-27T03:22:13 | 351,796,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test HD Wallet keypool restore function.
Two nodes. Node1 is under test. Node0 is providing transactions and generating blocks.
- Start node1, shutdown and backup wallet.
- Generate 110 keys (enough to drain the keypool). Store key 90 (in the initial keypool) and key 110 (beyond the initial keypool). Send funds to key 90 and key 110.
- Stop node1, clear the datadir, move wallet file back into the datadir and restart node1.
- connect node1 to node0. Verify that they sync and node1 receives its funds."""
import shutil
from test_framework.test_framework import MulecoinTestFramework
from test_framework.util import assert_equal, connect_nodes_bi, sync_blocks
class KeypoolRestoreTest(MulecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ['-keypool=100', '-keypoolmin=20']]
def run_test(self):
self.tmpdir = self.options.tmpdir
self.nodes[0].generate(101)
self.log.info("Make backup of wallet")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/node1/regtest/wallet.dat", self.tmpdir + "/wallet.bak")
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.log.info("Generate keys for wallet")
addr_oldpool = []
addr_extpool = []
for _ in range(90):
addr_oldpool = self.nodes[1].getnewaddress()
for _ in range(20):
addr_extpool = self.nodes[1].getnewaddress()
self.log.info("Send funds to wallet")
self.nodes[0].sendtoaddress(addr_oldpool, 10)
self.nodes[0].generate(1)
self.nodes[0].sendtoaddress(addr_extpool, 5)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.log.info("Restart node with wallet backup")
self.stop_node(1)
shutil.copyfile(self.tmpdir + "/wallet.bak", self.tmpdir + "/node1/regtest/wallet.dat")
self.log.info("Verify keypool is restored and balance is correct")
self.start_node(1, self.extra_args[1])
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
assert_equal(self.nodes[1].getbalance(), 15)
assert_equal(self.nodes[1].listtransactions()[0]['category'], "receive")
# Check that we have marked all keys up to the used keypool key as used
assert_equal(self.nodes[1].validateaddress(self.nodes[1].getnewaddress())['hdkeypath'], "m/0'/0'/110'")
if __name__ == '__main__':
KeypoolRestoreTest().main()
| [
"root@DESKTOP-AOBIGEQ.localdomain"
] | root@DESKTOP-AOBIGEQ.localdomain |
5683033c35209cce456734e560d9bd3c07451980 | b049a961f100444dde14599bab06a0a4224d869b | /sdk/python/pulumi_azure_native/security/v20190801/__init__.py | 6309308fa63d8a40d72ad8c853ec0211cc0f2c9f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-azure-native | b390c88beef8381f9a71ab2bed5571e0dd848e65 | 4c499abe17ec6696ce28477dde1157372896364e | refs/heads/master | 2023-08-30T08:19:41.564780 | 2023-08-28T19:29:04 | 2023-08-28T19:29:04 | 172,386,632 | 107 | 29 | Apache-2.0 | 2023-09-14T13:17:00 | 2019-02-24T20:30:21 | Python | UTF-8 | Python | false | false | 459 | py | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .device_security_group import *
from .get_device_security_group import *
from .get_iot_security_solution import *
from .iot_security_solution import *
from ._inputs import *
from . import outputs
| [
"github@mikhail.io"
] | github@mikhail.io |
879437f3995fc2c8af33708e20e65ea71d787eed | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy1998.py | 8ea67d5a234bcee1b81265ef51492d40802ad06f | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,931 | py | # qubit number=4
# total number=32
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.x(input_qubit[3]) # number=14
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[3]) # number=29
prog.cz(input_qubit[2],input_qubit[3]) # number=30
prog.h(input_qubit[3]) # number=31
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=24
prog.cz(input_qubit[3],input_qubit[2]) # number=25
prog.h(input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.x(input_qubit[2]) # number=23
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=20
prog.x(input_qubit[1]) # number=21
prog.x(input_qubit[3]) # number=27
prog.x(input_qubit[3]) # number=28
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1998.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
5a9fb527004b7c85da090fbd398b277106d50371 | e0c8662a56d89730043146ddc340e9e0b9f7de72 | /plugin/1183fe82-1596.py | 02dad21d9a024d399fccb78fe53c31cad6e8bc1d | [] | no_license | izj007/bugscan_poc | f2ef5903b30b15c230b292a1ff2dc6cea6836940 | 4490f3c36d4033bdef380577333722deed7bc758 | refs/heads/master | 2020-09-22T17:20:50.408078 | 2019-01-18T09:42:47 | 2019-01-18T09:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | #coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
"""
POC Name : sgc8000 大型旋转机监控系统报警短信模块泄露
Author : a
mail : a@lcx.cc
refer : 打雷 http://www.wooyun.org/bugs/wooyun-2015-0135197/
波及各大能源公司,包括中石油,中石化,中海油,中煤等等等等全国各个化工能源公司
"""
import urlparse
def assign(service, arg):
if service == 'sgc8000':
arr = urlparse.urlparse(arg)
return True, '%s://%s/' % (arr.scheme, arr.netloc)
def audit(arg):
p ="sg8k_sms/"
url = arg + p
code2, head, res, errcode, _ = curl.curl2(url)
if (code2 ==200) and ('SG8000' in res) and ('getMachineList' in res) and ('cancelSendMessage' in res):
security_warning(url)
if __name__ == '__main__':
from dummy import *
audit(assign('sgc8000', 'http://www.pindodo.com/')[1]) | [
"yudekui@wsmtec.com"
] | yudekui@wsmtec.com |
829b93625b164aec03032b7f6b7d6a98b68afbfb | 4522fc52bc43654aadd30421a75bae00a09044f0 | /isis/itzamara/search_item.py | 8508a4237dc9d83db9719b60b26ba7f43ce31d6f | [] | no_license | qesoalpe/anelys | 1edb8201aa80fedf0316db973da3a58b67070fca | cfccaa1bf5175827794da451a9408a26cd97599d | refs/heads/master | 2020-04-07T22:39:35.344954 | 2018-11-25T05:23:21 | 2018-11-25T05:23:21 | 158,779,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | from isis.dialog_search_text import Dialog_Search_Text
from isis.data_model.table import Table
from sarah.acp_bson import Client
class Search_Item(Dialog_Search_Text):
def __init__(self, parent=None):
Dialog_Search_Text.__init__(self, parent)
self.agent_itzamara = None
self.store = None
self.search_by_sku = True
self.search_by_code_ref = True
self.agent_itzamara = Client(Search_Item.APP_ID, 'itzamara')
def searching(self, e):
if self.search_by_sku:
msg = {'type_message': 'find_one', 'type': 'itzamara/item', 'query': {'sku': e['text']}}
answer = self.agent_itzamara(msg)
if 'result' in answer and answer['result'] is not None:
e['selected'] = answer['result']
return
if self.search_by_code_ref:
msg = {'type_message': 'request', 'request_type': 'get', 'get': 'itzamara/item_related_to_code_ref',
'code_ref': e.text}
answer = self.agent_itzamara(msg)
if 'result' in answer and answer.result is not None:
e.selected = answer.result
return
msg = {'type_message': 'find', 'type': 'itzamara/item', 'query': {'description': {'!like': e['text']}}}
if self.store is not None:
msg['query']['store'] = self.store
answer = self.agent_itzamara.send_msg(msg)
e['list'] = answer['result']
table = Table()
e['table'] = table
table.columns.add('sku', str)
table.columns.add('description', str)
table.datasource = e.list
APP_ID = 'isis.itzamara.Search_Item'
| [
"qesoalpe@gmail.com"
] | qesoalpe@gmail.com |
6834426075b03b496ae3de4b06d6f72d73bf5839 | 35a10ea7657fb28b4ae5a95045bc8e715b0b8d1c | /mysite/main/migrations/0005_delete_essaycategory.py | 3baecf7fefd629a67d9303b72c037ffca744b4da | [
"MIT"
] | permissive | nsky80/editorial | d7c978be4b8b8ea1cec6b764dd2e9860ebdf0867 | e85106e32d5d5ff8b9ac7f140b0c8f67d34b2dc0 | refs/heads/master | 2020-04-29T08:41:57.601027 | 2020-03-05T18:37:02 | 2020-03-05T18:37:02 | 175,995,388 | 2 | 0 | MIT | 2019-08-19T18:29:58 | 2019-03-16T16:20:23 | HTML | UTF-8 | Python | false | false | 299 | py | # Generated by Django 2.2.1 on 2019-07-21 09:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20190316_0140'),
]
operations = [
migrations.DeleteModel(
name='EssayCategory',
),
]
| [
"satishkumary80@gmail.com"
] | satishkumary80@gmail.com |
35b09022fa3e5caa076b0ac3a5627233549ded43 | 55c552b03a07dcfa2d621b198aa8664d6ba76b9a | /Algorithm/BOJ/4134_다음 소수_s4/4134.py | 7845e2c7cdb25cd978361a4bf6a02925ddce3a0b | [] | no_license | LastCow9000/Algorithms | 5874f1523202c10864bdd8bb26960953e80bb5c0 | 738d7e1b37f95c6a1b88c99eaf2bc663b5f1cf71 | refs/heads/master | 2023-08-31T12:18:45.533380 | 2021-11-07T13:24:32 | 2021-11-07T13:24:32 | 338,107,899 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | # boj 4134 다음 소수 s4
# noj.am/4134
for _ in range(int(input())):
n = int(input())
if n <= 1:
print(2)
continue
maxRange = 80000
flag = [False, False] + [True] * (maxRange - 1)
for num in range(2, maxRange + 1):
if flag[num]:
for i in range(num + num, maxRange + 1, num):
flag[i] = False
num = n
while True:
for i in range(int(num ** 0.5) + 1):
if flag[i]:
if num % i == 0:
num += 1
break
else:
print(num)
break
'''
특정 수의 양의 제곱근 이하의 소수들로 나누어 떨어지면 소수x
''' | [
"sys19912002@hanmail.net"
] | sys19912002@hanmail.net |
5dc7334bd95e7f16687b5903ecfc180f29cb6d4a | f6d7c30a7ed343e5fe4859ceaae1cc1965d904b7 | /htdocs/submissions/5dc7334bd95e7f16687b5903ecfc180f29cb6d4a.py | f240b858556dd23d3f4d394931854f0d7c911994 | [] | no_license | pycontest/pycontest.github.io | ed365ebafc5be5d610ff9d97001240289de697ad | 606015cad16170014c41e335b1f69dc86250fb24 | refs/heads/master | 2021-01-10T04:47:46.713713 | 2016-02-01T11:03:46 | 2016-02-01T11:03:46 | 50,828,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | n=' .'
m=' _ .'
e='| |.'
a='|_|.'
r=' |.'
l='| .'
d=' _|.'
s='|_ .'
n=m+e+a,n+r+r,m+d+s,m+d+d,n+a+r,m+s+d,m+s+a,m+r+r,m+a+a,m+a+d
def seven_seg(x):
a=['']*3
for d in x:
l=n[int(d)].split('.')
for z in range(3):a[z]+=l[z]
return '\n'.join(a)+'\n'
| [
"info@pycontest.net"
] | info@pycontest.net |
536ff942f90b91a7fb29e3a9076d36b582318420 | f50f1aa1f8f139d546db3230a1cb1f53043fd9e6 | /multimedia/converter/dvdbackup/actions.py | 2d25a92cb98af0288858998d4ff57bf65653cc77 | [] | no_license | pars-linux/corporate2 | 7887961d1552d39bc3b0bef4a60fd3413d9b82bb | 14d1eacfc824fb8d0bff8173e7ac06b36b88d10d | refs/heads/master | 2020-05-26T15:02:12.005654 | 2017-02-27T03:07:14 | 2017-02-27T03:07:14 | 82,476,084 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2007-2009 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
WorkDir = "dvdbackup"
def build():
autotools.compile("-I/usr/include/dvdread -o dvdbackup src/dvdbackup.c -ldvdread")
def install():
pisitools.dobin("dvdbackup")
| [
"ozancaglayan@users.noreply.github.com"
] | ozancaglayan@users.noreply.github.com |
05d70434fa49d8b43242b5bc319959b97b833cbb | f1cb02057956e12c352a8df4ad935d56cb2426d5 | /LeetCode/2402. Meeting Rooms III/Solution.py | e04582cc6433fc14b232d16a3615a444f3a02378 | [] | no_license | nhatsmrt/AlgorithmPractice | 191a6d816d98342d723e2ab740e9a7ac7beac4ac | f27ba208b97ed2d92b4c059848cc60f6b90ce75e | refs/heads/master | 2023-06-10T18:28:45.876046 | 2023-05-26T07:46:42 | 2023-05-26T07:47:10 | 147,932,664 | 15 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | MEETING_START = 1
MEETING_END = 0
class Solution:
def mostBooked(self, n: int, meetings: List[List[int]]) -> int:
# Time Complexity: O(M log MN)
# Space Complexity: O(N + M)
events = [(start, MEETING_START, end - start) for start, end in meetings]
pending = []
available_rooms = list(range(n))
heapq.heapify(events)
meeting_cnter = Counter()
while events:
event = heapq.heappop(events)
event_type = event[1]
cur_time = event[0]
if event_type == MEETING_START:
duration = event[2]
if available_rooms:
room = heapq.heappop(available_rooms)
heapq.heappush(events, (cur_time + duration, MEETING_END, room))
meeting_cnter[room] += 1
else:
heapq.heappush(pending, (cur_time, duration))
elif event_type == MEETING_END:
room = event[2]
if pending:
_, duration = heapq.heappop(pending)
heapq.heappush(events, (cur_time + duration, MEETING_END, room))
meeting_cnter[room] += 1
else:
heapq.heappush(available_rooms, room)
ret = 0
for room in range(n):
if meeting_cnter[room] > meeting_cnter[ret]:
ret = room
return ret
| [
"nphamcs@gmail.com"
] | nphamcs@gmail.com |
d3b472805b2615dba2cc942d9347ee58fddd00d3 | c3c5e21f02dc1ce325e4ba0ea49f04503b2124e5 | /Code/bigger_nn/plot_data.py | db6d913bed2c04cdfd9179ac0b7baf3b67594253 | [] | no_license | Rootpie-Studios/RL-in-HaliteIV | 5fdd76cc5523deec2847059cc6237d638c2a9881 | 431f35d47b898e68983772f9b908764741347ad5 | refs/heads/master | 2023-06-05T20:21:07.543805 | 2021-06-21T11:18:57 | 2021-06-21T11:18:57 | 378,900,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | import tensorflow as tf
import src.plot as plot
import bigger_nn.conf as conf
user_choice = input('Plot exploit data? y/n \n')
if user_choice == 'y':
folder = conf.get('EXPLOIT_GAMES_FOLDER')
else:
folder = conf.get('GAMES_FOLDER')
try:
model = tf.keras.models.load_model(conf.get('SHIP_MODEL'))
except:
model = conf.get('build_model')()
model.save(conf.get('SHIP_MODEL'), save_format='tf')
plot.plot_progress(10, folder, conf.get('NAME'), conf.get('AGENT2')[:-3], model, conf.get('input_data')) | [
"kimhaafi@gmail.com"
] | kimhaafi@gmail.com |
c12b59a23c758ac14e36e2ed849148850d9a5571 | bc441bb06b8948288f110af63feda4e798f30225 | /capacity_admin_sdk/model/container/pod_status_pb2.py | 6de9ae973841286f0c359c4fb191d12570e42f8d | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,896 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pod_status.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from capacity_admin_sdk.model.container import container_status_pb2 as capacity__admin__sdk_dot_model_dot_container_dot_container__status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pod_status.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x10pod_status.proto\x12\tcontainer\x1a\x39\x63\x61pacity_admin_sdk/model/container/container_status.proto\"\xbc\x01\n\tPodStatus\x12\r\n\x05phase\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x39\n\x15initContainerStatuses\x18\x03 \x03(\x0b\x32\x1a.container.ContainerStatus\x12\x35\n\x11\x63ontainerStatuses\x18\x04 \x03(\x0b\x32\x1a.container.ContainerStatus\x12\x0e\n\x06hostIP\x18\x05 \x01(\t\x12\r\n\x05podIP\x18\x06 \x01(\tBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[capacity__admin__sdk_dot_model_dot_container_dot_container__status__pb2.DESCRIPTOR,])
_PODSTATUS = _descriptor.Descriptor(
name='PodStatus',
full_name='container.PodStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='phase', full_name='container.PodStatus.phase', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='container.PodStatus.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='initContainerStatuses', full_name='container.PodStatus.initContainerStatuses', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='containerStatuses', full_name='container.PodStatus.containerStatuses', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hostIP', full_name='container.PodStatus.hostIP', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='podIP', full_name='container.PodStatus.podIP', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=91,
serialized_end=279,
)
_PODSTATUS.fields_by_name['initContainerStatuses'].message_type = capacity__admin__sdk_dot_model_dot_container_dot_container__status__pb2._CONTAINERSTATUS
_PODSTATUS.fields_by_name['containerStatuses'].message_type = capacity__admin__sdk_dot_model_dot_container_dot_container__status__pb2._CONTAINERSTATUS
DESCRIPTOR.message_types_by_name['PodStatus'] = _PODSTATUS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PodStatus = _reflection.GeneratedProtocolMessageType('PodStatus', (_message.Message,), {
'DESCRIPTOR' : _PODSTATUS,
'__module__' : 'pod_status_pb2'
# @@protoc_insertion_point(class_scope:container.PodStatus)
})
_sym_db.RegisterMessage(PodStatus)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
bc054ee3a41b51ba0c02fd50959e5203a9ce1be3 | cf3e0cd574248629ebeacb224fe96d3df19ea9ca | /django_todo_in_team/settings.py | d5bfaa75efdd103703ca23205c24708053e97cc2 | [] | no_license | ashur-k/Team-work-Hub | 34b9d9ec43cca53d11e072fd6a68e831ee6b4795 | 4da991e3166f8650cb24024ede07c485e9ee9dda | refs/heads/master | 2023-03-12T12:19:15.456078 | 2021-03-01T22:01:11 | 2021-03-01T22:01:11 | 340,626,504 | 0 | 0 | null | 2021-02-20T10:42:06 | 2021-02-20T10:29:03 | Shell | UTF-8 | Python | false | false | 3,416 | py | """
Django settings for django_todo_in_team project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+ivqwhb1)y)^hu@1ud@8*t1y&+q2(9+j(x%2^9_wj^sv^zonld'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'todo_in_team.apps.TodoInTeamConfig',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_todo_in_team.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'templates', 'allauth'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_todo_in_team.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
| [
"ashurkanwal@yahoo.com"
] | ashurkanwal@yahoo.com |
e2edf2037288c178e8a0f0e1fa79e543746def5c | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Pygame/pygame-vkeyboard/pygame_vkeyboard/examples/numeric.py | 06131ee38998682cbdc8eb7bbac5590455f99b08 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:79af64ea269c1a2a6da43c377fc8ff4650b04b20a967d881019b3c32032044c3
size 1411
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
40a01e3075679851cc169322b9dbbbc9dc892738 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/exclamations/_heeling.py | ffab0ee927e6e8b45a4426cdd4c700dded04cec9 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py |
from xai.brain.wordbase.exclamations._heel import _HEEL
#calss header
class _HEELING(_HEEL, ):
def __init__(self,):
_HEEL.__init__(self)
self.name = "HEELING"
self.specie = 'exclamations'
self.basic = "heel"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2d4172e12adf3d83dd245b7a72488ead42370f77 | 4d675034878c4b6510e1b45b856cc0a71af7f886 | /configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py | 50689aadf6cab9414aab1a7a9e72ef8231355e4f | [
"Apache-2.0",
"BSD-2-Clause-Views",
"MIT",
"BSD-2-Clause"
] | permissive | shinya7y/UniverseNet | 101ebc2ad8f15482ee45ea8d6561aa338a0fa49e | 3652b18c7ce68122dae7a32670624727d50e0914 | refs/heads/master | 2023-07-22T08:25:42.646911 | 2023-07-08T18:09:34 | 2023-07-08T18:09:34 | 263,555,721 | 407 | 58 | Apache-2.0 | 2023-01-27T01:13:31 | 2020-05-13T07:23:43 | Python | UTF-8 | Python | false | false | 390 | py | _base_ = '../dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py'
model = dict(
backbone=dict(
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
plugins=[
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, True),
position='after_conv3')
]))
| [
"noreply@github.com"
] | shinya7y.noreply@github.com |
5f5b8c3d8d2cd2aa32541dee22abcced270af05c | 18b9251055f88b6fc28108d2c209d2b71b6b6f5d | /rebnypy/lookups.py | 7c5a5411b6df113baed6e7a21d440d6121db1068 | [
"MIT"
] | permissive | justinsteffy/rebnypy | 03335509513e4ad3f7cb999723db284b5936cd98 | e1ca47401d1ffc64d7969a73831de8a63a83751b | refs/heads/master | 2020-04-04T05:22:08.026875 | 2016-08-31T03:17:13 | 2016-08-31T03:17:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,867 | py | LOOKUPS = {
"AirConditioning": {
"C":"Central",
"F":"Free Standing",
"M":"Multi-Zone",
"N":"None",
"T":"Through the Wall",
"U":"Unknown Type",
"W":"Window Units",
},
"Borough": {
"BK":"Brooklyn",
"BX":"Bronx",
"NY":"Manhattan",
"QN":"Queens",
"SI":"Staten Island",
},
"BuildingAccess": {
"A":"Attended Elevator",
"E":"Elevator",
"K":"Keyed Elevator",
"N":"None",
"W":"Walk-up",
},
"BuildingAge": {
"O":"Post-war",
"R":"Pre-war",
},
"BuildingType": {
"D":"Development Site",
"F":"Loft",
"G":"Garage",
"H":"High-Rise",
"L":"Low-Rise",
"M":"Mid-Rise",
"O":"Hotel",
"P":"Parking Lot",
"S":"House",
"T":"Townhouse",
"V":"Vacant Lot",
},
"Heat": {
"B":"Baseboard",
"C":"Central",
"E":"Electric",
"G":"Gas",
"M":"Multi-Zone",
"O":"Oil",
"R":"Radiator",
"U":"Unknown Type",
},
"LeaseTerm": {
"1":"One Year",
"2":"Two Year",
"3":"Short-term",
"4":"Month-to-month",
"5":"Specific term",
"6":"One or Two year",
"7":"Short or Long term",
},
"LeaseType": {
"B":"Stabilized Lease",
"C":"Commercial",
"N":"Non-Stabilized Lease",
"On-Line":"Residential, Inc | IDX API documentation v1.0 | Published 11/01/2014 | Page 27 of 29",
"S":"Stabilized Sublease",
"U":"Non-Stabilized Sublease",
},
# Docs say ListingStatus, but the data is actually Status. So I'm duplicating this lookup here
"Status": {
"A":"Active",
"B":"Board Approved",
"C":"Contract Signed",
"E":"Leases Signed",
"H":"TOM",
"I":"POM",
"J":"Exclusive Expired",
"L":"Leases Out",
"O":"Contract Out",
"P":"Offer Accepted/Application",
"R":"Rented",
"S":"Sold",
},
"ListingStatus": {
"A":"Active",
"B":"Board Approved",
"C":"Contract Signed",
"E":"Leases Signed",
"H":"TOM",
"I":"POM",
"J":"Exclusive Expired",
"L":"Leases Out",
"O":"Contract Out",
"P":"Offer Accepted/Application",
"R":"Rented",
"S":"Sold",
},
"ListingStatusRental": {
"A":"Active",
"E":"Leases Signed",
"H":"TOM",
"I":"POM",
"J":"Exclusive Expired",
"L":"Leases Out",
"P":"Application",
"R":"Rented",
},
"ListingStatusSale": {
"A":"Active",
"B":"Board Approved",
"C":"Contract Signed",
"H":"TOM",
"I":"POM",
"J":"Exclusive Expired",
"O":"Contract Out",
"P":"Offer Accepted",
"S":"Sold",
},
"ListingType": {
"A":"Ours Alone",
"B":"Exclusive",
"C":"COF",
"L":"Limited",
"O":"Open",
"Y":"Courtesy",
"Z":"Buyer's Broker",
},
"MediaType": {
"F":"Floor plan",
"I":"Interior Photo",
"M":"Video",
"O":"Other",
"V":"Virtual Tour",
},
"Ownership": {
"C":"Commercial",
"D":"Condop",
"G":"Garage",
"I":"Income Property",
"M":"Multi-Family",
"N":"Condo",
"P":"Co-op",
"R":"Rental Property",
"S":"Single Family",
"T":"Institutional",
"V":"Development Site",
"X":"Mixed Use",
},
"PayPeriod": {
"M":"Monthly",
"Y":"Yearly",
},
"PetPolicy": {
"A":"Pets Allowed",
"C":"Case By Case",
"D":"No Dogs",
"N":"No Pets",
"T":"No Cats",
},
"SalesOrRent": {
"R":"Apartment for Rent",
"S":"Apartment for Sale",
"T":"Building for Sale",
},
"ServiceLevel": {
"A":"Attended Lobby",
"C":"Concierge",
"F":"Full Time Doorman",
"I":"Voice Intercom",
"N":"None",
"P":"Part Time Doorman",
"S":"Full Service",
"U":"Virtual Doorman",
"V":"Video Intercom",
}
}
def expand_row(row):
output = {}
for k, v in row.items():
if k in LOOKUPS:
output[k] = LOOKUPS[k].get(v, 'UNKNOWN')
elif hasattr(v, 'items'):
output[k] = expand_row(v)
else:
output[k] = v
return output
| [
"me@kevinmccarthy.org"
] | me@kevinmccarthy.org |
22ca343e3f7395a467d41262e0894c3079afe3eb | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_9542.py | 377c17d361a2194f088528cf78b28ae16b57ab04 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | # Python: getting lowest integer in list of tuples
>>> nums = [(), (), ('24', '25', '26', '27'), (), (), (), ()]
>>> min(int(j) for i in nums for j in i)
24
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
9f6014f724cb1cccfd86b1c42cd8bece2474f0e8 | d039bfad0e8cc3184b8112e23f9a1ef06b4001d3 | /map_motif_space.py | 4b0987e9c71f09552ac5e8da5b6151b9d3611ae0 | [] | no_license | vhsvhs/prob_motif | 9afa93f8f3c922103be77052641902c105fe4f16 | 7bdc2485ead23c7d092cc89d3975b37c52c31135 | refs/heads/master | 2021-01-01T20:48:55.052391 | 2012-05-29T22:48:04 | 2012-05-29T22:48:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,139 | py | #
# Input: a directory of mlib files
#
# Output: a graph where node size = # of motifs bounds by a PWM, edge weight = # of motifs jointly bound by two PWMs
#
from argparser import *
import matplotlib.pyplot as plt
import networkx as nx
import os
ap = ArgParser(sys.argv)
def build_mlib_hash(genes_files):
"""genes_files[gene] = path to mlib"""
"""Returns ret[gene] = list of motifs"""
ret = {}
for gene in genes_files.keys():
ret[gene] = []
f = genes_files[gene]
fin = open(f, "r")
lines = fin.readlines()
fin.close()
for l in lines:
if l.__len__() > 2 and False == l.startswith("#"):
ret[gene].append( l.strip() )
#print gene, ret[gene]
return ret
def get_mlib_files(dirpath):
"""Input: directory path, output = list of mlib files."""
mlib_files = {}
for f in os.listdir( dirpath ):
if f.__contains__("mlib"):
tokens = f.split(".")
gene = tokens[1]
mlib_files[gene] = dirpath + "/" + f
return mlib_files
def plot_mlib_distribution(tf_m):
mliblens = []
for tf in tf_m.keys():
mliblens.append( tf_m[tf].__len__() )
plt.hist(mliblens, 20)
plt.show()
def print_mlib_stats( tf_m ):
# Build a reverse lookup hash
mlen_tf = {}
for tf in tf_m.keys():
mlen = tf_m[tf].__len__()
if mlen not in mlen_tf:
mlen_tf[mlen] = []
mlen_tf[mlen].append( tf )
mlen_sorted = mlen_tf.keys()
mlen_sorted.sort()
print "\n. Motif Library Details:"
print "[N motifs]\t[tfs]"
for mlen in mlen_sorted:
print mlen, "\t", mlen_tf[mlen]
def intersect(a, b):
return list(set(a) & set(b))
def plot_motif_space(tf_m):
print "\n. Plotting Motif Space..."
G = nx.Graph()
for tf in tf_m.keys():
G.add_node(tf, size=1.0*tf_m[tf].__len__())
tfs = tf_m.keys()
for i in range(0, tfs.__len__()):
for j in range(i+1, tfs.__len__()):
x = intersect(tf_m[ tfs[i] ], tf_m[ tfs[j] ]).__len__()
if x > 0:
print tfs[i], tfs[j], x
G.add_edge(tfs[i], tfs[j], weight=0.1*x)
plt.figure(figsize=(8,8))
pos=nx.spring_layout(G,iterations=20)
nodesize=[]
for v in G.node:
nodesize.append(G.node[v]["size"])
nx.draw_networkx_nodes(G, pos, node_size=nodesize, node_color="blue", alpha=0.5, linewidths=0.1)
for e in G.edges():
#print e
edgewidth = [ G.get_edge_data(e[0],e[1])["weight"] ]
this_edge = [ e ]
#print this_edge, edgewidth
#print [(pos[e[0]],pos[e[1]]) for e in this_edge]
nx.draw_networkx_edges(G, pos, edgelist = this_edge, width = edgewidth)
nx.draw_networkx_labels(G, pos, font_size=9, font_family="Helvetica")
plt.show()
#
#
# MAIN:
#
#
mlib_dir = ap.getOptionalArg("--mlibdir")
if mlib_dir != False:
mlib_files = get_mlib_files(mlib_dir)
tf_m = build_mlib_hash(mlib_files)
plot_mlib_distribution( tf_m )
print_mlib_stats( tf_m )
plot_motif_space( tf_m )
| [
"victorhansonsmith@gmail.com"
] | victorhansonsmith@gmail.com |
cd27c38ac0da5b55f53fe18973011869bb0c24fd | 7a043d45cf0ed0938a10a03121c2b75fdd0cc76a | /081/p081.py | dd354f637d511c2ffdc9af4ac4929a3218868b0c | [] | no_license | tormobr/Project-euler | f8d67292a6426ffba9d589d01c31e2d59249e4ff | b544540b0fee111a6f6cfe332b096fe1ec88935c | refs/heads/master | 2020-05-29T17:27:03.767501 | 2020-02-13T13:06:34 | 2020-02-13T13:06:34 | 189,276,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | from collections import defaultdict
import time
def solve():
return dynamic()
def create_dict():
d = defaultdict(lambda: [])
for i in range(h):
for j in range(w):
d[(i,j)].append((i+1, j))
d[(i,j)].append((i, j+1))
return d
def dynamic():
for i in range(h-1, -1, -1):
data[h][i] += data[h][i+1]
data[i][w] += data[i+1][w]
for i in range(h-1, -1, -1):
for j in range(w-1, -1, -1):
data[i][j] += min(data[i+1][j], data[i][j+1])
return data[0][0]
def read_file():
return [list(map(int, line.split(","))) for line in open("input.txt").read().strip().split("\n")]
data = read_file()
dist = defaultdict(int)
h = len(data) -1
w = len(data[0]) -1
print(solve())
| [
"tormod.brandshoi@gmail.com"
] | tormod.brandshoi@gmail.com |
68216f6212a047ad3f07031c8093629b15a45287 | c46a3546e58539444e508a97b68cac21e6422baa | /food_order/migrations/0002_auto_20181122_1056.py | 8b876e1ffa6057042762c414128bfa639c38c270 | [] | no_license | ahsanhabib98/Food-service-system | 7b21b9bd3d2f7db381bc01689c6a23d3b16bb933 | 5bbc50e375d1af8c551b1048f2c6504505ac0cf4 | refs/heads/master | 2022-12-11T02:35:05.097986 | 2018-11-28T11:19:23 | 2018-11-28T11:19:23 | 159,385,627 | 0 | 0 | null | 2022-12-08T02:27:40 | 2018-11-27T19:07:25 | Python | UTF-8 | Python | false | false | 1,099 | py | # Generated by Django 2.0.5 on 2018-11-22 04:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('food_area', '0001_initial'),
('food_order', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('client_name', models.CharField(max_length=100)),
('client_image', models.ImageField(upload_to='images')),
('client_contact_no', models.PositiveIntegerField()),
('area', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='food_area.Area')),
],
),
migrations.AddField(
model_name='order',
name='client_info',
field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='food_order.Client'),
preserve_default=False,
),
]
| [
"ahredoan@gmail.com"
] | ahredoan@gmail.com |
15a9b3847e9f55cac74d29796b5229c70f430981 | 626da446e92b58808a179c4fc23c3de5b457e472 | /inventory/templatetags/inventory_tags.py | 6afb098ccfdf25cede7943eed30a96877ca09a56 | [
"BSD-3-Clause"
] | permissive | Eraldo/eraldoenergy | 76049cbb06fcc26940b8c004875f8aefbf65a95e | cb07a7722826924df4d416e8930c87f11bec3dd8 | refs/heads/master | 2020-12-23T17:43:21.683449 | 2018-05-05T18:12:43 | 2018-05-05T18:12:43 | 44,062,390 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from classytags.helpers import InclusionTag
from django import template
from django.utils.translation import ugettext_lazy as _
register = template.Library()
@register.tag
class Item(InclusionTag):
name = 'item'
template = 'inventory/widgets/item.html'
def get_context(self, context, **kwargs):
item = context.get('item')
if item:
return {
'name': item,
'image': item.image_1,
'price': item.price,
'original_price': item.price_original,
'url': item.url,
'id': item.pk,
}
else:
return {}
| [
"eraldo@eraldo.org"
] | eraldo@eraldo.org |
94730257260c0e6d4e04e0b65fa5129689586ecd | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/OperationTaskDTO.py | 989736892e65777dcec20f04f1a0c7083adda82e | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 6,284 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class OperationTaskDTO(object):
def __init__(self):
self._comment = None
self._operation_task_id = None
self._operation_task_name = None
self._operation_task_type = None
self._plan_end_time = None
self._plan_start_time = None
self._reject_comment = None
self._status = None
self._status_name = None
self._user_id = None
self._user_name = None
@property
def comment(self):
return self._comment
@comment.setter
def comment(self, value):
self._comment = value
@property
def operation_task_id(self):
return self._operation_task_id
@operation_task_id.setter
def operation_task_id(self, value):
self._operation_task_id = value
@property
def operation_task_name(self):
return self._operation_task_name
@operation_task_name.setter
def operation_task_name(self, value):
self._operation_task_name = value
@property
def operation_task_type(self):
return self._operation_task_type
@operation_task_type.setter
def operation_task_type(self, value):
self._operation_task_type = value
@property
def plan_end_time(self):
return self._plan_end_time
@plan_end_time.setter
def plan_end_time(self, value):
self._plan_end_time = value
@property
def plan_start_time(self):
return self._plan_start_time
@plan_start_time.setter
def plan_start_time(self, value):
self._plan_start_time = value
@property
def reject_comment(self):
return self._reject_comment
@reject_comment.setter
def reject_comment(self, value):
self._reject_comment = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def status_name(self):
return self._status_name
@status_name.setter
def status_name(self, value):
self._status_name = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
@property
def user_name(self):
return self._user_name
@user_name.setter
def user_name(self, value):
self._user_name = value
def to_alipay_dict(self):
params = dict()
if self.comment:
if hasattr(self.comment, 'to_alipay_dict'):
params['comment'] = self.comment.to_alipay_dict()
else:
params['comment'] = self.comment
if self.operation_task_id:
if hasattr(self.operation_task_id, 'to_alipay_dict'):
params['operation_task_id'] = self.operation_task_id.to_alipay_dict()
else:
params['operation_task_id'] = self.operation_task_id
if self.operation_task_name:
if hasattr(self.operation_task_name, 'to_alipay_dict'):
params['operation_task_name'] = self.operation_task_name.to_alipay_dict()
else:
params['operation_task_name'] = self.operation_task_name
if self.operation_task_type:
if hasattr(self.operation_task_type, 'to_alipay_dict'):
params['operation_task_type'] = self.operation_task_type.to_alipay_dict()
else:
params['operation_task_type'] = self.operation_task_type
if self.plan_end_time:
if hasattr(self.plan_end_time, 'to_alipay_dict'):
params['plan_end_time'] = self.plan_end_time.to_alipay_dict()
else:
params['plan_end_time'] = self.plan_end_time
if self.plan_start_time:
if hasattr(self.plan_start_time, 'to_alipay_dict'):
params['plan_start_time'] = self.plan_start_time.to_alipay_dict()
else:
params['plan_start_time'] = self.plan_start_time
if self.reject_comment:
if hasattr(self.reject_comment, 'to_alipay_dict'):
params['reject_comment'] = self.reject_comment.to_alipay_dict()
else:
params['reject_comment'] = self.reject_comment
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.status_name:
if hasattr(self.status_name, 'to_alipay_dict'):
params['status_name'] = self.status_name.to_alipay_dict()
else:
params['status_name'] = self.status_name
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
if self.user_name:
if hasattr(self.user_name, 'to_alipay_dict'):
params['user_name'] = self.user_name.to_alipay_dict()
else:
params['user_name'] = self.user_name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OperationTaskDTO()
if 'comment' in d:
o.comment = d['comment']
if 'operation_task_id' in d:
o.operation_task_id = d['operation_task_id']
if 'operation_task_name' in d:
o.operation_task_name = d['operation_task_name']
if 'operation_task_type' in d:
o.operation_task_type = d['operation_task_type']
if 'plan_end_time' in d:
o.plan_end_time = d['plan_end_time']
if 'plan_start_time' in d:
o.plan_start_time = d['plan_start_time']
if 'reject_comment' in d:
o.reject_comment = d['reject_comment']
if 'status' in d:
o.status = d['status']
if 'status_name' in d:
o.status_name = d['status_name']
if 'user_id' in d:
o.user_id = d['user_id']
if 'user_name' in d:
o.user_name = d['user_name']
return o
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
23dbae54366ea14f75cf9be0f657750d526197d8 | 67055c66ae4bca82ee61dab1757b73cc21559cfd | /miko.py | ebef8e8f43676019b4164ec58017c98197ecbf5a | [] | no_license | kevinelong/network_python | 0482f694c9c050f4226bdfb7cc4fe31df26dd17b | 41158808bac5d127c3f3f3cfaea202cb60d7167c | refs/heads/master | 2023-07-14T12:20:14.550017 | 2021-08-26T00:13:45 | 2021-08-26T00:13:45 | 359,521,517 | 1 | 20 | null | 2021-04-28T18:08:15 | 2021-04-19T16:12:03 | Python | UTF-8 | Python | false | false | 2,195 | py | from netmiko import ConnectHandler
import os
os.environ["NET_TEXTFSM"] = "d:/python37/lib/site-packages/ntc_templates/templates"
linux = {
'device_type': 'linux', #cisco_ios
'host': '3.81.60.164',
'username': 'kevin',
'password': 'S!mpl312',
}
c = ConnectHandler(**linux) # use of kwargs optional, could just use regular parameters
raw = c.send_command("arp -a")
print(raw)
r = c.send_command("arp -a", use_textfsm=True)
print(r)
print(r[0]["ip_address"])
for item in r:
print(item)
print(item["ip_address"])
"""
EXPECTED OUTPUT:
[{'rev_dns': '_gateway', 'ip_address': '172.30.1.1', 'mac_address': '0e:18:8d:7f:b8:65', 'hw_type': 'ether', 'interface': 'eth0'}]
"""
# C:\Users\kevin\ntc-templates
# from netmiko import ConnectHandler
# import paramiko
# private_key_path = "~/.ssh/clvrclvr.pem"
# linux = {
# 'device_type': 'linux',
# 'host': 'clvrclvr.com',
# 'username': 'kevin',
# 'password': 'S!mpl312',
# 'pkey' : paramiko.RSAKey.from_private_key_file(private_key_path)
# }
# c = ConnectHandler(**linux) # use of kwargs optional, could just use regular parameters
# r = c.send_command("arp -a")
#SHOW COMMAND OUTPUT
#show platform diag
"""
Chassis type: ASR1004
Slot: R0, ASR1000-RP1
Running state : ok, active
Internal state : online
Internal operational state : ok
Physical insert detect time : 00:00:45 (2w5d ago)
Software declared up time : 00:00:45 (2w5d ago)
CPLD version : 07062111
Firmware version : 12.2(33r)XNC
Slot: F0, ASR1000-ESP10
Running state : ok, active
Internal state : online
Internal operational state : ok
Physical insert detect time : 00:00:45 (2w5d ago)
Software declared up time : 00:03:15 (2w5d ago)
Hardware ready signal time : 00:00:46 (2w5d ago)
Packet ready signal time : 00:04:00 (2w5d ago)
CPLD version : 07091401
Firmware version : 12.2(33r)XNC
Slot: P0, ASR1004-PWR-AC
State : ok
Physical insert detect time : 00:03:08 (2w5d ago)
Slot: P1, ASR1004-PWR-AC
State : ok
Physical insert d
""" | [
"kevinelong@gmail.com"
] | kevinelong@gmail.com |
ee39e7c0980af8ab5743db76e6b42c88addd8bd4 | dead81f54b0aa5292f69bb5fef69e9910a137fc4 | /demo/entrypoint.py | d42cf12bc00a567fb61b3167a8116c7fb936cb17 | [
"MIT"
] | permissive | Nekmo/djangocms-bs3-theme | 0b7274b73b5072cbb8c737f13a94143363ae864d | 1155588414164d6e5d027131e9181856f8a80d5d | refs/heads/master | 2023-01-11T19:58:29.922023 | 2020-03-08T17:10:37 | 2020-03-08T17:10:37 | 56,414,025 | 0 | 0 | MIT | 2022-12-26T20:05:06 | 2016-04-17T01:47:51 | CSS | UTF-8 | Python | false | false | 1,387 | py | #!/usr/bin/env python
import sys
import os
import subprocess
COMMIT_FILE = '.last_build_commit'
os.environ.setdefault('BUILD_DJANGO', '1')
os.environ.setdefault('FORCE_BUILD', '1')
def execute_command(*args):
subprocess.check_call(args)
def get_current_commit():
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('utf-8').strip()
def read_file():
if not os.path.lexists(COMMIT_FILE):
return ''
with open(COMMIT_FILE, 'r') as f:
return f.read().strip('\n')
def write_file(data):
if data is None:
return
with open(COMMIT_FILE, 'w') as f:
return f.write(data)
def build_now():
execute_command('make', 'collectstatic')
# execute_command('./manage.py', 'collectstatic', '--noinput')
execute_command('make', 'migrate')
def build(force_build=False):
current_commit = None
if not force_build:
current_commit = get_current_commit()
if force_build or read_file() != current_commit:
try:
build_now()
except subprocess.CalledProcessError:
exit(1)
else:
write_file(current_commit)
def start(*parameters):
subprocess.check_call(['gunicorn'] + list(parameters))
if __name__ == '__main__':
if os.environ.get('BUILD_DJANGO') == '1':
build(os.environ.get('FORCE_BUILD') == '1')
start(*sys.argv[1:])
| [
"contacto@nekmo.com"
] | contacto@nekmo.com |
c3acf2f9644f455d0582bdf419bac21f96bab503 | eebacbc58a1c99fb6e32f8cd56cac6e18947d3e7 | /1.python_foundation/2.String_and_encode.py | 266ce0314234e4a8b2c1d08d9bd197a30e5bfb48 | [] | no_license | fzingithub/LearnPythonFromLiao | ad7f959d7e667a464f2b9a6b1cedfd0f08baaf8e | fcb0f2e7f905aca253b3986c4a1ceab6b82b7cae | refs/heads/master | 2020-03-29T19:37:32.831341 | 2018-09-27T10:39:11 | 2018-09-27T10:39:11 | 150,273,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 13 15:25:06 2018
@author: FZ
"""
#string and unicode
#ASCII Unicode 可变长 UTF-8
print ('包含中的string')
print (ord('中'))
print (chr(20013))
#转码
print ('ABC'.encode('ascii'))
print ('中文'.encode('UTF-8'))
#字节流编码
print (b'\xe4\xb8\xad\xe6\x96\x87'.decode('UTF-8'))
print (len('youareabetterman'))
#通配符 excerse
s1 = 72
s2 = 85
rate = (85-72)/72*100
print ('%.1f%%'% rate)
#小结:python使用的是 unicode编码,直接支持多语言
#string 与 byte转换时需要指定编码最常用的是 UTF-8 | [
"1194585271@qq.com"
] | 1194585271@qq.com |
2b5c14efee99ffcc5240e049f48d3ac73d1e0b14 | 762b4373122e5cc791eb81759590008bdfd1f034 | /core/models/others/capsnet_em.py | d64ae0859347d99465b89adabc666fd3340a2ac6 | [] | no_license | StephenTaylor1998/high-resolution-capsule | 50929527e84d57704e1295195c6a1b555367e565 | f999b01893bde98eb053d2778e8a1bad526d8293 | refs/heads/master | 2023-05-11T18:25:50.760820 | 2021-05-24T09:47:27 | 2021-05-24T09:47:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,711 | py | import torch
import torch.nn as nn
from core import models
from core.layers.others.base import weights_init, resnet20_backbone
from core.layers.others.layers_em import EmRouting2d
from core.models import resnet18_dwt_tiny_half, resnet18_tiny_half, resnet10_tiny_half
class Model(nn.Module):
def __init__(self, num_classes, planes=16, num_caps=16, depth=3, backbone=resnet18_dwt_tiny_half, caps_size=16,
in_shape=(3, 32, 32)):
super(Model, self).__init__()
self.num_caps = num_caps
self.depth = depth
self.layers = backbone(backbone=True, in_channel=in_shape[0])
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
# ========= ConvCaps Layers
for d in range(1, depth):
stride = 2 if d == 1 else 1
self.conv_layers.append(EmRouting2d(num_caps, num_caps, caps_size, kernel_size=3, stride=stride, padding=1))
self.norm_layers.append(nn.BatchNorm2d(4 * 4 * num_caps))
final_shape = 4
# EM
self.conv_a = nn.Conv2d(num_caps * planes, num_caps, kernel_size=3, stride=1, padding=1, bias=False)
self.conv_pose = nn.Conv2d(num_caps * planes, num_caps * caps_size, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(num_caps)
self.bn_pose = nn.BatchNorm2d(num_caps * caps_size)
self.fc = EmRouting2d(num_caps, num_classes, caps_size, kernel_size=final_shape, padding=0)
self.apply(weights_init)
def forward(self, x):
out = self.layers(x)
# EM
a, pose = self.conv_a(out), self.conv_pose(out)
a, pose = torch.sigmoid(self.bn_a(a)), self.bn_pose(pose)
for m, bn in zip(self.conv_layers, self.norm_layers):
a, pose = m(a, pose)
pose = bn(pose)
a, _ = self.fc(a, pose)
out = torch.mean(a, dim=[2, 3], keepdim=False)
return out
def capsnet_em_depthx1(num_classes=10, args=None, **kwargs):
in_shape = (3, 32, 32) if args.in_shape is None else args.in_shape
backbone = models.__dict__[args.backbone]
return Model(num_classes, depth=1, backbone=backbone, in_shape=in_shape)
def capsnet_em_depthx2(num_classes=10, args=None, **kwargs):
in_shape = (3, 32, 32) if args.in_shape is None else args.in_shape
backbone = models.__dict__[args.backbone]
return Model(num_classes, depth=2, backbone=backbone, in_shape=in_shape)
def capsnet_em_depthx3(num_classes=10, args=None, **kwargs):
in_shape = (3, 32, 32) if args.in_shape is None else args.in_shape
backbone = models.__dict__[args.backbone]
return Model(num_classes, depth=3, backbone=backbone, in_shape=in_shape)
| [
"2684109034@qq.com"
] | 2684109034@qq.com |
5b20d25002d847a60df58c8f76a76214777c80ff | 7530867a3f3d80600b1f728b65d778f7b4e3deb0 | /layers/linear.py | 7e903791440ba4262e4e1d8e443136de7d048a95 | [
"MIT"
] | permissive | rezer0dai/zer0nets | 1fba5895fcb0397ec481b9cdbfa686f7b4cd83e8 | 982fa69571478dc61c6110f3287fad94af6d4f2c | refs/heads/master | 2020-03-24T09:36:23.499160 | 2018-07-28T00:02:08 | 2018-07-28T00:02:08 | 142,632,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | import numpy as np
from feat_space import *
class Linear(FeatureSpace):
def name(self):
return "linear"
def signal(self, x):
return x
def prime(self, _):
return 1.
| [
"aaa@bbb.ccc"
] | aaa@bbb.ccc |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.