blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a31b322b32555a927b3a63f5092900042142b843
|
27398b2a8ed409354d6a36c5e1d2089dad45b4ac
|
/backend/common/decapod_common/models/properties.py
|
2a7dbbf75e03a2cf644b94bf0f4bf491dda45988
|
[
"Apache-2.0"
] |
permissive
|
amar266/ceph-lcm
|
e0d6c1f825f5ac07d2926bfbe6871e760b904340
|
6b23ffd5b581d2a1743c0d430f135261b7459e38
|
refs/heads/master
| 2021-04-15T04:41:55.950583
| 2018-03-23T12:51:26
| 2018-03-23T12:51:26
| 126,484,605
| 0
| 0
| null | 2018-03-23T12:50:28
| 2018-03-23T12:50:27
| null |
UTF-8
|
Python
| false
| false
| 3,449
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains special property descriptors."""
import enum
import importlib
class Property:
SENTINEL = object()
class ChoicesProperty(Property):
def __init__(self, attr_name, choices):
self.choices = choices
self.attr_name = attr_name
def __get__(self, instance, owner):
value = getattr(instance, self.attr_name, self.SENTINEL)
if value is self.SENTINEL:
raise AttributeError()
return value
def __set__(self, instance, value):
choices = self.choices
if callable(choices) and type(choices) is not enum.EnumMeta:
choices = choices()
try:
if value in choices:
setattr(instance, self.attr_name, value)
return
except TypeError:
pass
raise ValueError("Unknown error")
class ModelProperty(Property):
@classmethod
def get_value_id(cls, value):
if hasattr(value, "model_id"):
return value.model_id
if isinstance(value, dict):
return value.get("_id", value.get("id"))
if value is None:
return None
return str(value)
@classmethod
def get_model(cls, klass, model_id):
return klass.find_by_model_id(model_id)
def __init__(self, model_class_name, id_attribute):
self.model_class_name = model_class_name
self.id_attribute = id_attribute
self.instance_attribute = id_attribute + "_instance"
def __get__(self, instance, owner):
value = instance.__dict__.get(self.instance_attribute, self.SENTINEL)
if value is not self.SENTINEL:
return value
model_id = instance.__dict__.get(self.id_attribute)
model = self.get_model(self.get_class(), model_id)
instance.__dict__[self.instance_attribute] = model
return model
def __set__(self, instance, value):
value_id = self.get_value_id(value)
instance.__dict__[self.id_attribute] = value_id
instance.__dict__[self.instance_attribute] = self.SENTINEL
def get_class(self):
module, obj_name = self.model_class_name.rsplit(".", 1)
module = importlib.import_module(module)
klass = getattr(module, obj_name)
return klass
class ModelListProperty(ModelProperty):
@classmethod
def get_value_id(cls, value):
return [super(ModelListProperty, cls).get_value_id(item)
for item in value]
@classmethod
def get_model(cls, klass, model_id):
query = {
"model_id": {"$in": model_id},
"is_latest": True
}
models = []
for item in klass.list_raw(query):
model = klass()
model.update_from_db_document(item)
models.append(model)
return models
|
[
"sarkhipov@mirantis.com"
] |
sarkhipov@mirantis.com
|
215a011898e29aea78aa8531f6aadbd936358259
|
d68c9105c03bef9dce2e438b5b91c2bdd0d856e2
|
/[9095] 1, 2, 3 더하기.py
|
308b6ff62e407f5494c58d595b9839b3addcf2e6
|
[] |
no_license
|
newfull5/Baekjoon-Online-Judge
|
2a2dd1080af234551ecab6277968fedeb170a1f4
|
00d04f6c21080e3ad7c0fb06ca311f2324a591c0
|
refs/heads/master
| 2023-06-29T21:05:07.539911
| 2021-07-16T09:23:46
| 2021-07-16T09:23:46
| 267,557,726
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
def Reculsive(n):
global answer
if n >=3:
Reculsive(n-3)
if n >=2:
Reculsive(n-2)
if n >=1:
Reculsive(n-1)
if n == 0:
answer += 1
return
for _ in range(int(input())):
answer = 0
Reculsive(int(input()))
print(answer)
|
[
"noreply@github.com"
] |
newfull5.noreply@github.com
|
fcf3fe369d825fc8f70166e86d6154d98a1eccfa
|
23bc3e2bc6b2b9e3fd19f738d4767d09bec590b5
|
/CourseWork/Labs/lab3/vivek_pygame_base_template.py
|
e880efac680ed5ff5a5856816fdf28423d8e2bb4
|
[] |
no_license
|
vivekVells/GameDesignProgramming
|
4e683114bf487d2ea4c5c1c4a2b7a3375e8be8e7
|
bee0fbc4d0a8d0e4001d6c9c9b35fea6b74da1f9
|
refs/heads/master
| 2020-03-27T13:49:52.159394
| 2018-12-12T09:37:01
| 2018-12-12T09:37:01
| 146,630,596
| 0
| 0
| null | 2018-12-12T08:32:11
| 2018-08-29T16:49:28
|
Python
|
UTF-8
|
Python
| false
| false
| 2,516
|
py
|
"""
Show how to use a sprite backed by a graphic.
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
Explanation video: http://youtu.be/vRB_983kUMc
"""
import pygame
# Define some colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
pygame.init()
# Set the width and height of the screen [width, height]
size = (700, 500)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Vivek's 1st House via PyGame")
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
# --- Main event loop
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# --- Game logic should go here
# --- Drawing code should go here
# First, clear the screen to white. Don't put other drawing commands
# above this, or they will be erased with this command.
screen.fill(WHITE)
# rect(screen, GREEN, [x,y,breadth, length], 0)
# polygon(screen, BLACK, [[midx, midy], [leftx, lefty], [rightx, righty]], 5)
# drawing house
pygame.draw.rect(screen, RED, [100, 200, 200, 200], 0)
# drawing chimney
pygame.draw.rect(screen, BLACK, [125, 140, 20, 60], 0)
# drawing roof
pygame.draw.polygon(screen, WHITE, [[200, 100], [100, 200], [300, 200]], 0)
pygame.draw.polygon(screen, BLACK, [[200, 100], [100, 200], [300, 200]], 3)
# drawing window
pygame.draw.rect(screen, GREEN, [125, 250, 10, 30], 0)
pygame.draw.rect(screen, GREEN, [175, 250, 10, 30], 0)
pygame.draw.rect(screen, GREEN, [225, 250, 10, 30], 0)
pygame.draw.rect(screen, GREEN, [275, 250, 10, 30], 0)
# drawing the door
pygame.draw.rect(screen, BLACK, [190, 350, 20, 50], 0)
BLUE = (0, 0, 255)
BOARD_X = 50
BOARD_Y = 350
BOARD_LENGTH = 150
BOARD_WIDTH = 70
BOARD_COLOR_FILL = 0
pygame.draw.rect(screen, BLUE, [BOARD_X, BOARD_Y, BOARD_LENGTH, BOARD_WIDTH], BOARD_COLOR_FILL)
# --- Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# --- Limit to 60 frames per second
clock.tick(60)
# Close the window and quit.
# If you forget this line, the program will 'hang'
# on exit if running from IDLE.
pygame.quit()
|
[
"techengineervivek@gmail.com"
] |
techengineervivek@gmail.com
|
4bb3df61f7e8707d0f5b6dc0a372e300a836a1f0
|
d5e4d88e4124ab2387bac64e7d7b76ff37793bf6
|
/011/problem11.py
|
072127ab96c86257506ca23cee758a4aa9743be4
|
[] |
no_license
|
grawinkel/ProjectEuler
|
1ae5572eec92e4307183e8b30222ffa39ef4bbce
|
b470dd4219c769587769c9a70ec3bae5d3ca1166
|
refs/heads/master
| 2021-05-26T20:01:03.410567
| 2012-10-05T16:58:48
| 2012-10-05T16:58:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,421
|
py
|
# To change this template, choose Tools | Templates
# and open the template in the editor.
__author__="meatz"
__date__ ="$01.08.2010 14:10:38$"
m = []
max = 0
maxa,maxb = 0,0
def nw(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a-1][b-1]) * int(m[a-2][b-2]) * int(m[a-3][b-3])
if (prod > max):
max = prod
maxa = a
maxb = b
def n(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a-1][b]) * int(m[a-2][b]) * int(m[a-3][b])
if (prod > max):
max = prod
maxa = a
maxb = b
def sw(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a+1][b-1]) * int(m[a+2][b-2]) * int(m[a+3][b-3])
if (prod > max):
max = prod
maxa = a
maxb = b
def w(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a][b-1]) * int(m[a][b-2]) * int(m[a][b-3])
if (prod > max):
max = prod
maxa = a
maxb = b
def s(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a+1][b]) * int(m[a+2][b]) * int(m[a+3][b])
if (prod > max):
max = prod
maxa = a
maxb = b
def se(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a+1][b+1]) * int(m[a+2][b+2]) * int(m[a+3][b+3])
if (prod > max):
max = prod
maxa = a
maxb = b
def ne(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a-1][b+1]) * int(m[a-2][b+2]) * int(m[a-3][b+3])
if (prod > max):
max = prod
maxa = a
maxb = b
def e(a,b):
global max,maxa,maxb
prod = int(m[a][b]) * int(m[a][b+1]) * int(m[a][b+2]) * int(m[a][b+3])
if (prod > max):
max = prod
maxa = a
maxb = b
def run(m):
for a in range(20):
for b in range(20):
if (a-3>=0):
n(a,b)
if (a+3<=19):
s(a,b)
if (b-3>=0): #check the west
w(a,b)
if (a-3>=0):
nw(a,b)
if (a+3<=19):
sw(a,b)
if (b+3<20): #check the east
e(a,b)
if (a-3>=0):
ne(a,b)
if (a+3<20):
se(a,b)
if __name__ == "__main__":
f = open("data.txt","r")
for x in f.readlines():
m.append(x.split(" "))
run(m)
print max
|
[
"matthias@grawinkel.com"
] |
matthias@grawinkel.com
|
b290f6c4c523dba303d7efb6b9edbfc26d01ce6b
|
4d0bbeb8ab52f7e450aff20056f7509e12751258
|
/lists/migrations/0003_list.py
|
da0266eb470c2bba6c9bd9b11f8ba74b47076401
|
[] |
no_license
|
chicocheco/tdd_book
|
f7c9246dcb4eb5327704c72f655bf6e187b28849
|
574b1082aa523c7434f50e0c4cbdf5777ddf50ef
|
refs/heads/master
| 2022-05-02T17:44:27.217329
| 2020-03-13T18:57:22
| 2020-03-13T18:57:22
| 197,633,503
| 0
| 0
| null | 2022-04-22T22:19:12
| 2019-07-18T17:56:43
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
# Generated by Django 2.2.3 on 2019-08-08 07:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_item_text'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
|
[
"stanislav.matas@gmail.com"
] |
stanislav.matas@gmail.com
|
8e54379c9e0e2512323873740a307b5ac6552d0b
|
de79ece8981f0fd241bcea578e4a534a1213397e
|
/spirl/configs/few_shot_imitation_learning/kitchen/hierarchical_cl_gc_demo_slide_demo_trained_vae/conf.py
|
1f2fcbafcc3b7ab14bb8c70bf240ee9d69987572
|
[
"BSD-3-Clause"
] |
permissive
|
ahmeda14960/fist
|
3ee684cd7da0bb531d791321f1af09adad386ab4
|
baf2b0bfed12a9bc0db9a099abeefad1ef618d1c
|
refs/heads/master
| 2023-08-02T01:35:29.983633
| 2021-09-13T20:07:28
| 2021-09-13T20:07:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,905
|
py
|
import os
from spirl.models.closed_loop_spirl_mdl import GoalClSPiRLMdl
from spirl.components.logger import Logger
from spirl.utils.general_utils import AttrDict
from spirl.configs.default_data_configs.kitchen import data_spec
from spirl.components.evaluator import TopOfNSequenceEvaluator
from spirl.data.kitchen.src.kitchen_data_loader import KitchenStateSeqDataset
current_dir = os.path.dirname(os.path.realpath(__file__))
fewshot_dataset = KitchenStateSeqDataset(
data_path='data/kitchen/kitchen-demo-microwave_kettle_hinge_slide.hdf5',
subseq_len=10,
)
env = AttrDict(
task_list = ['microwave', 'kettle', 'slide cabinet', 'hinge cabinet']
)
contra_model_cf = AttrDict(
state_dimension=data_spec.state_dim,
hidden_size=128,
feature_size=32,
)
configuration = {
'model': GoalClSPiRLMdl,
'logger': Logger,
'data_dir': '.',
'epoch_cycles_train': 1,
'evaluator': TopOfNSequenceEvaluator,
'top_of_n_eval': 100,
'top_comp_metric': 'mse',
'batch_size': 128,
'num_epochs': 50,
'fewshot_data': fewshot_dataset,
'fewshot_batch_size': 128,
'contra_config': contra_model_cf,
'contra_ckpt': './experiments/contrastive/kitchen/exact-mixed-all/exact_model.pt',
'finetune_vae': True
}
configuration = AttrDict(configuration)
model_config = AttrDict(
state_dim=data_spec.state_dim,
action_dim=data_spec.n_actions,
n_rollout_steps=10,
kl_div_weight=5e-4,
nz_enc=128,
nz_mid=128,
n_processing_layers=5,
cond_decode=True,
# checkpt_path=f'{os.environ["EXP_DIR"]}/skill_prior_learning/kitchen/hierarchical_cl_gc_no_slide'
)
# Dataset
data_config = AttrDict()
data_config.dataset_spec = data_spec
data_config.dataset_spec['dataset_path'] = './data/kitchen/kitchen-mixed-no-slide.hdf5'
data_config.dataset_spec.subseq_len = model_config.n_rollout_steps + 1 # flat last action from seq gets cropped
|
[
"kourosh_hakhamaneshi@berkeley.edu"
] |
kourosh_hakhamaneshi@berkeley.edu
|
ae48ce85c8caa8b2632e5bbc58f086388955ab75
|
df7f13ec34591fe1ce2d9aeebd5fd183e012711a
|
/hata/discord/application_command/application_command/tests/test__validate_version.py
|
0311466ec90e46c18abaa78702c11bd7846f90a8
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
HuyaneMatsu/hata
|
63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e
|
53f24fdb38459dc5a4fd04f11bdbfee8295b76a4
|
refs/heads/master
| 2023-08-20T15:58:09.343044
| 2023-08-20T13:09:03
| 2023-08-20T13:09:03
| 163,677,173
| 3
| 3
|
Apache-2.0
| 2019-12-18T03:46:12
| 2018-12-31T14:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
import vampytest
from ..fields import validate_version
def test__validate_version__0():
"""
Tests whether `validate_version` works as intended.
Case: passing.
"""
version = 202302260011
for input_value, expected_output in (
(version, version),
(str(version), version),
):
output = validate_version(input_value)
vampytest.assert_eq(output, expected_output)
def test__validate_version__1():
"""
Tests whether `validate_version` works as intended.
Case: `ValueError`.
"""
for input_value in (
'-1',
-1,
):
with vampytest.assert_raises(AssertionError, ValueError):
validate_version(input_value)
def test__validate_version__2():
"""
Tests whether `validate_version` works as intended.
Case: `TypeError`.
"""
for input_value in (
12.6,
):
with vampytest.assert_raises(TypeError):
validate_version(input_value)
|
[
"re.ism.tm@gmail.com"
] |
re.ism.tm@gmail.com
|
c65f10f40c7746b6a0f8b226efa07085cf5a26f6
|
3634703ad8685c9bc5d73edf148b7b8722356c0e
|
/Algorithm/programmers/pg_2016년.py
|
872b394b834701e55c74ca2098cf27d1a25d7d18
|
[] |
no_license
|
chj3748/TIL
|
23d88f97ebc8b1e3a06bb93752dfd2d331d01fd8
|
40a4e524c28945c95f059b0dee598abb686abe04
|
refs/heads/master
| 2022-02-26T16:43:56.964719
| 2022-02-14T04:43:20
| 2022-02-14T04:43:20
| 235,233,054
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
# math | programmers 2016년
# github.com/chj3748
import sys
def input():
return sys.stdin.readline().rstrip()
def solution(a, b):
months = [0, 0, 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for i in range(1, 14):
months[i] += months[i - 1]
weeks = [ 'THU', 'FRI', 'SAT', 'SUN', 'MON', 'TUE', 'WED']
return weeks[(months[a] + b) % 7]
|
[
"redsmile123@naver.com"
] |
redsmile123@naver.com
|
b27b059c477b45152d67c266b8bde14dfdbcfe93
|
e122ab31559f7551e4bc4dff6dfa7f7dbbd10168
|
/jaqs/__init__.py
|
0be750ea380b5ec64652ff6b426589ec22e928c8
|
[
"Apache-2.0"
] |
permissive
|
WayneWan413/JAQS
|
ffb909d6d550451552697358735ec5dd74975b2d
|
e7362fc261f49dd7a4353c9a9a3f98d6ef9a78b4
|
refs/heads/master
| 2021-08-30T10:30:20.675837
| 2017-12-17T14:14:59
| 2017-12-17T14:14:59
| 113,726,696
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 294
|
py
|
# encoding: utf-8
"""
JAQS
~~~~
Open source quantitative research&trading framework.
copyright: (c) 2017 quantOS-org.
license: Apache 2.0, see LICENSE for details.
"""
import os
__version__ = '0.6.6'
SOURCE_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
[
"brillliantz@outlook.com"
] |
brillliantz@outlook.com
|
d3e1cb323db751ac2050493151ddde48bb868a90
|
566638e179b0add891e1d5c8900d35ae531af6dc
|
/alembic_simplelis/versions/6487bfd4c8aa_renamed_columns.py
|
6cd0406fe6bfe944447113a2432ef47fb6ff8af3
|
[] |
no_license
|
likit/querystud
|
9b023a45adfdbf6dc8a3a2f97fefb82b765c8690
|
1702c09ff6931b2cd94d0b55ef42f244c503a68a
|
refs/heads/master
| 2020-03-25T19:25:40.412824
| 2018-08-09T18:08:48
| 2018-08-09T18:08:48
| 144,082,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,273
|
py
|
"""renamed columns
Revision ID: 6487bfd4c8aa
Revises: 8c08809abb09
Create Date: 2018-08-09 15:54:28.683879
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '6487bfd4c8aa'
down_revision = '8c08809abb09'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('labs', sa.Column('reportDate', sa.Date(), nullable=True))
op.add_column('labs', sa.Column('reportTime', sa.Time(), nullable=True))
op.alter_column('labs', 'recvDate',
existing_type=sa.DATE(),
nullable=False)
op.alter_column('labs', 'recvTime',
existing_type=postgresql.TIME(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('labs', 'recvTime',
existing_type=postgresql.TIME(),
nullable=True)
op.alter_column('labs', 'recvDate',
existing_type=sa.DATE(),
nullable=True)
op.drop_column('labs', 'reportTime')
op.drop_column('labs', 'reportDate')
# ### end Alembic commands ###
|
[
"likit.pre@mahidol.edu"
] |
likit.pre@mahidol.edu
|
50bee84349089e1aa4828f68a88a6d8a89dfdf41
|
568d7d17d09adeeffe54a1864cd896b13988960c
|
/month01/day07/exercise05.py
|
3493424818067ec8a5a6e612d415a224bd930150
|
[
"Apache-2.0"
] |
permissive
|
Amiao-miao/all-codes
|
e2d1971dfd4cecaaa291ddf710999f2fc4d8995f
|
ec50036d42d40086cac5fddf6baf4de18ac91e55
|
refs/heads/main
| 2023-02-24T10:36:27.414153
| 2021-02-01T10:51:55
| 2021-02-01T10:51:55
| 334,908,634
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
dict_travel_info = {
"北京": {
"景区": ["长城", "故宫"],
"美食": ["烤鸭", "豆汁焦圈", "炸酱面"]
},
"四川": {
"景区": ["九寨沟", "峨眉山"],
"美食": ["火锅", "兔头"]
}
}
#1.打印北京的第一个景区
print(dict_travel_info["北京"]["景区"][0])
# 打印四川的第二个美食
print(dict_travel_info["四川"]["美食"][1])
# 2. 所有城市 (一行一个)
for key in dict_travel_info:
print(key)
#3.北京所有美食(一行一个)
for i in dict_travel_info["北京"]["美食"]:
print(i)
# 4.打印所有城市的所有美食(一行一个)
for value in dict_travel_info.values():
for v in value["美食"]:
print(v)
|
[
"895854566@qq.com"
] |
895854566@qq.com
|
f6d7fbdef5cdaedb0fe2f8536b75a1173aca58fe
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/containerservice/azure-mgmt-containerservice/generated_samples/maintenance_configurations_create_update.py
|
c0abfbc1bab5cf537fc774e2f3b60eed6869983b
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,900
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.containerservice import ContainerServiceClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-containerservice
# USAGE
python maintenance_configurations_create_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ContainerServiceClient(
credential=DefaultAzureCredential(),
subscription_id="subid1",
)
response = client.maintenance_configurations.create_or_update(
resource_group_name="rg1",
resource_name="clustername1",
config_name="default",
parameters={
"properties": {
"notAllowedTime": [{"end": "2020-11-30T12:00:00Z", "start": "2020-11-26T03:00:00Z"}],
"timeInWeek": [{"day": "Monday", "hourSlots": [1, 2]}],
}
},
)
print(response)
# x-ms-original-file: specification/containerservice/resource-manager/Microsoft.ContainerService/aks/stable/2023-07-01/examples/MaintenanceConfigurationsCreate_Update.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
d993401850d52d98db8b268955eeb445554951db
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_371/ch47_2020_10_04_13_40_50_371443.py
|
3cc3cadd86d947006768c00c032c11e851f56842
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
def estritamente_crescente(lista):
numero_atual = 0
numero_anterior =0
i=0
nova_lista=[]
while len(lista)>i:
numero_atual=lista[i]
if numero_atual>numero_anterior:
numero_anterior=numero_atual
nova_lista.append(numero_atual)
i+=1
else:
i+=1
return nova_lista
|
[
"you@example.com"
] |
you@example.com
|
e73ea00412857d8bc51a1b6f7dd676d32b152336
|
1c6a29a7dcd62470d594d5e42dbea9ff79cc47f5
|
/shade/_heat/utils.py
|
24cb0b07115e1da1eb535444aa86612c446b7d0a
|
[
"Apache-2.0"
] |
permissive
|
major/shade
|
a1691a3e3311f1b87f4a31c3a26929ddc2541b7a
|
0ced9b5a7568dd8e4a33b6627f636639bcbbd8a3
|
refs/heads/master
| 2023-06-07T17:15:47.089102
| 2020-06-01T22:59:14
| 2020-06-01T22:59:14
| 54,499,600
| 0
| 0
|
Apache-2.0
| 2023-06-01T21:26:36
| 2016-03-22T18:34:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,842
|
py
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
from six.moves.urllib import error
from six.moves.urllib import parse
from six.moves.urllib import request
from shade import exc
def base_url_for_url(url):
parsed = parse.urlparse(url)
parsed_dir = os.path.dirname(parsed.path)
return parse.urljoin(url, parsed_dir)
def normalise_file_path_to_url(path):
if parse.urlparse(path).scheme:
return path
path = os.path.abspath(path)
return parse.urljoin('file:', request.pathname2url(path))
def read_url_content(url):
try:
# TODO(mordred) Use requests
content = request.urlopen(url).read()
except error.URLError:
raise exc.OpenStackCloudException(
'Could not fetch contents for %s' % url)
if content:
try:
content.decode('utf-8')
except ValueError:
content = base64.encodestring(content)
return content
def resource_nested_identifier(rsrc):
nested_link = [l for l in rsrc.links or []
if l.get('rel') == 'nested']
if nested_link:
nested_href = nested_link[0].get('href')
nested_identifier = nested_href.split("/")[-2:]
return "/".join(nested_identifier)
|
[
"mordred@inaugust.com"
] |
mordred@inaugust.com
|
633f67db56b3fc27c70671b9cff7a90c51faa754
|
96538cc3eee3d73d429f3476d0e895be95d695e3
|
/worker/db/redisdb.py
|
e7b69ffd0713371d1380120d397a1485debac7fe
|
[] |
no_license
|
FashtimeDotCom/distributed-spider
|
d9555670216e68d4ff031e466cbf3529d080a534
|
33292f098403fa73239e0c7353e4cc5918be981b
|
refs/heads/master
| 2020-03-22T11:43:14.796426
| 2018-07-06T10:51:48
| 2018-07-06T11:34:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,874
|
py
|
# -*- coding: utf-8 -*-
'''
Created on 2016-11-16 16:25
---------
@summary: 操作redis数据库
---------
@author: Boris
'''
import sys
sys.path.append('../')
import init
import redis
import utils.tools as tools
from utils.log import log
IP = tools.get_conf_value('config.conf', 'redis', 'ip')
PORT = int(tools.get_conf_value('config.conf', 'redis', 'port'))
DB = int(tools.get_conf_value('config.conf', 'redis', 'db'))
USER_PASS = tools.get_conf_value('config.conf', 'redis', 'user_pass')
class Singleton(object):
def __new__(cls, *args, **kwargs):
if not hasattr(cls,'_inst'):
cls._inst=super(Singleton,cls).__new__(cls, *args, **kwargs)
return cls._inst
class RedisDB():
def __init__(self, ip = IP, port = PORT, db = DB, user_pass = USER_PASS):
# super(RedisDB, self).__init__()
if not hasattr(self,'_redis'):
try:
self._redis = redis.Redis(host = ip, port = port, db = db, password = user_pass, decode_responses=True) # redis默认端口是6379
self._pipe = self._redis.pipeline(transaction=True) # redis-py默认在执行每次请求都会创建(连接池申请连接)和断开(归还连接池)一次连接操作,如果想要在一次请求中指定多个命令,则可以使用pipline实现一次请求指定多个命令,并且默认情况下一次pipline 是原子性操作。
except Exception as e:
raise
else:
log.debug('连接到redis数据库 ip:%s port:%s'%(ip, port))
def sadd(self, table, values):
'''
@summary: 使用无序set集合存储数据, 去重
---------
@param table:
@param values: 值; 支持list 或 单个值
---------
@result: 若库中存在 返回0,否则入库,返回1。 批量添加返回None
'''
if isinstance(values, list):
self._pipe.multi()
for value in values:
self._pipe.sadd(table, value)
self._pipe.execute()
else:
return self._redis.sadd(table, values)
def zadd(self, table, values, prioritys = 0):
'''
@summary: 使用有序set集合存储数据, 去重(值存在更新)
---------
@param table:
@param values: 值; 支持list 或 单个值
@param prioritys: 优先级; double类型,支持list 或 单个值。 根据此字段的值来排序, 值越小越优先。 可不传值,默认value的优先级为0
---------
@result:若库中存在 返回0,否则入库,返回1。 批量添加返回None
'''
if isinstance(values, list):
if not isinstance(prioritys, list):
prioritys = [prioritys] * len(values)
else:
assert len(values) == len(prioritys), 'values值要与prioritys值一一对应'
self._pipe.multi()
for value, priority in zip(values, prioritys):
self._pipe.zadd(table, value, priority)
self._pipe.execute()
else:
return self._redis.zadd(table, values, prioritys)
def zget(self, table, count = 0, is_pop = True):
'''
@summary: 从有序set集合中获取数据
---------
@param table:
@param count: 数量
@param is_pop:获取数据后,是否在原set集合中删除,默认是
---------
@result: 列表
'''
start_pos = 0 # 包含
end_pos = 0 if count == 0 else count - 1 # 包含
self._pipe.multi() # 标记事务的开始 参考 http://www.runoob.com/redis/redis-transactions.html
self._pipe.zrange(table, start_pos, end_pos) # 取值
if is_pop: self._pipe.zremrangebyrank(table, start_pos, end_pos) # 删除
results, count = self._pipe.execute()
return results
def zget_count(self, table, priority_min = None, priority_max = None):
'''
@summary: 获取表数据的数量
---------
@param table:
@param priority_min:优先级范围 最小值(包含)
@param priority_max:优先级范围 最大值(包含)
---------
@result:
'''
if priority_min != None and priority_max != None:
return self._redis.zcount(table, priority_min, priority_max)
else:
return self._redis.zcard(table)
def lpush(self, table, values):
if isinstance(values, list):
self._pipe.multi()
for value in values:
self._pipe.rpush(table, value)
self._pipe.execute()
else:
return self._redis.rpush(table, values)
def lpop(self, table, count = 1):
'''
@summary:
---------
@param table:
@param count:
---------
@result: 返回列表
'''
datas = []
count = count if count <= self.lget_count(table) else self.lget_count(table)
if count:
if count > 1:
self._pipe.multi()
while count:
data = self._pipe.lpop(table)
count -= 1
datas = self._pipe.execute()
else:
datas.append(self._redis.lpop(table))
return datas
def lget_count(self, table):
return self._redis.llen(table)
def clear(self, table):
self._redis.delete(table)
if __name__ == '__main__':
db = RedisDB()
# data = {
# "url": "http://www.icm9.com/",
# "status": 0,
# "remark": {
# "spider_depth": 3,
# "website_name": "早间新闻",
# "website_position": 23,
# "website_domain": "icm9.com",
# "website_url": "http://www.icm9.com/"
# },
# "depth": 0,
# "_id": "5b15f33d53446530acf20539",
# "site_id": 1,
# "retry_times": 0
# }
# print(db.sadd('25:25', data))
# print(db.zadd('26:26', [data]))
# # print(db.sadd('1', 1))
db.zadd('news_urls', '1', 1)
db.zadd('news_urls', '2', 1)
db.zadd('news_urls', '3', 2)
count = db.zget_count('news_urls', 2, 2)
print(count)
# print(type(data[0]))
# db.clear('name')
# import time
# start = time.time()
# # for i in range(10000):
# # db.zadd('test6', i)
# db.zadd('test7', list(range(10000)), [1])
# print(time.time() - start)
# db.zadd('test3', '1', 5)
# db.zadd('test3', '2', 6)
# db.zadd('test3', '3', 4)
data = db.zget('news_urls', 2)
print(data)
|
[
"boris_liu@foxmail.com"
] |
boris_liu@foxmail.com
|
9792a5b3135bd29aa5e53b9ae8901a638fa9d8f1
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/tools/nntool/nntool/quantization/multiplicative/quantizers/default_mult.py
|
0385ebd178ce814eee4883e29eda75e12c0747cf
|
[
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427
| 2023-08-10T09:04:44
| 2023-08-10T09:04:44
| 133,324,605
| 145
| 96
|
Apache-2.0
| 2023-08-27T19:03:52
| 2018-05-14T07:50:29
|
C
|
UTF-8
|
Python
| false
| false
| 1,696
|
py
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
import numpy as np
from nntool.quantization.qtype_constraint import MatchAll
from nntool.quantization.quantizers.no_change_mixin import NoChangeMixin
from nntool.quantization.unified_quantization_handler import (in_qs_constraint,
needs_stats,
out_qs_constraint,
params_type)
from ..mult_quantization_handler import MultQuantizionHandler
LOG = logging.getLogger('nntool.' + __name__)
@params_type('__default__')
@in_qs_constraint(MatchAll({'dtype': set([np.int8, np.int16, np.uint8, np.uint16])}))
@out_qs_constraint(MatchAll({'dtype': set([np.int8, np.int16, np.uint8, np.uint16])}))
@needs_stats(False)
class NoChangeMult(MultQuantizionHandler, NoChangeMixin):
@classmethod
def _quantize(cls, params, in_qs, stats, **kwargs):
return cls._handle(params, in_qs, stats, 'scaled', **kwargs)
|
[
"yao.zhang@greenwaves-technologies.com"
] |
yao.zhang@greenwaves-technologies.com
|
8e28e993c80f61da18a42c1591380ee8d5027018
|
94d5ef47d3244950a0308c754e0aa55dca6f2a0e
|
/migrations/versions/e19ce0373a4f_made_degrees_and_personal_info_a_a_one_.py
|
1cdef1768726407a573115deba478c710260bcc0
|
[] |
no_license
|
MUMT-IT/mis2018
|
9cbc7191cdc1bcd7e0c2de1e0586d8bd7b26002e
|
69fabc0b16abfeba44173caa93d4f63fa79033fd
|
refs/heads/master
| 2023-08-31T16:00:51.717449
| 2023-08-31T11:30:13
| 2023-08-31T11:30:13
| 115,810,883
| 5
| 5
| null | 2023-09-14T10:08:35
| 2017-12-30T17:06:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,377
|
py
|
"""made degrees and personal info a a one-to-many relationship
Revision ID: e19ce0373a4f
Revises: 7d048ab06595
Create Date: 2021-03-04 09:41:21.032186
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e19ce0373a4f'
down_revision = '7d048ab06595'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('eduqa_degrees',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('eduqa_programs',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('degree_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['degree_id'], ['eduqa_degrees.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('eduqa_curriculums',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('program_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['program_id'], ['eduqa_programs.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'staff_edu_degree', sa.Column('personal_info_id', sa.Integer(), nullable=True))
op.add_column(u'staff_edu_degree', sa.Column('received_date', sa.Date(), nullable=True))
op.create_foreign_key(None, 'staff_edu_degree', 'staff_personal_info', ['personal_info_id'], ['id'])
op.drop_constraint(u'staff_personal_info_highest_degree_id_fkey', 'staff_personal_info', type_='foreignkey')
op.drop_column(u'staff_personal_info', 'highest_degree_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(u'staff_personal_info', sa.Column('highest_degree_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key(u'staff_personal_info_highest_degree_id_fkey', 'staff_personal_info', 'staff_edu_degree', ['highest_degree_id'], ['id'])
op.drop_constraint(None, 'staff_edu_degree', type_='foreignkey')
op.drop_column(u'staff_edu_degree', 'received_date')
op.drop_column(u'staff_edu_degree', 'personal_info_id')
op.drop_table('eduqa_curriculums')
op.drop_table('eduqa_programs')
op.drop_table('eduqa_degrees')
# ### end Alembic commands ###
|
[
"likit.pre@mahidol.edu"
] |
likit.pre@mahidol.edu
|
0f97f6497d711f09d33f01461d992e7caa12c186
|
ed32eb1eb0a328a4ffe89e178fc4987470f333cd
|
/module/multi_process/multi_process_data_share_queue.py
|
5a0bf787d49a21a07716851ed7ecdbf5bd202769
|
[] |
no_license
|
xiaoyaojjian/py_learn
|
c6f5bdf31bcebf29dd914e81e6be9305a61265cc
|
95e494ea823d2074a05c1c2a49595002a1576093
|
refs/heads/master
| 2020-12-05T23:22:11.017066
| 2016-09-08T01:13:08
| 2016-09-08T01:13:08
| 67,654,055
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 350
|
py
|
"""
使用 multiprocessing 中的 Queue 队列, 实现进程间数据共享
"""
from multiprocessing import Process, Queue
def fun(q, n):
q.put(['hi, ', n])
if __name__ == '__main__':
q = Queue()
q.put('Ao')
for i in range(5):
p = Process(target=fun, args=(q, i))
p.start()
while True:
print(q.get())
|
[
"q2868765@qq.com"
] |
q2868765@qq.com
|
a811d153e337706d515599bbb07ff549b3e288e1
|
b0f45a16f34ff84e217ff20cc06f1e8280459504
|
/antgo/measures/matting_task.py
|
b4d89c343881b9e68b17ca02028d5a8540f7ccae
|
[] |
no_license
|
zhaoqike/antgo
|
c41dd4b8bc3e969f6008a6c17f0b44d0fe4a8eae
|
c8a62b2567f62db15f26c75dcc2191cb69f392ab
|
refs/heads/master
| 2021-07-18T17:37:58.652112
| 2017-09-12T01:19:15
| 2017-09-12T01:19:15
| 102,823,416
| 0
| 0
| null | 2017-09-12T08:04:20
| 2017-09-08T05:57:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,153
|
py
|
# encoding=utf-8
# @Time : 17-7-25
# @File : matting_task.py
# @Author :
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
import numpy as np
from antgo.task.task import *
from antgo.measures.base import *
from antgo.dataflow.common import *
from antgo.measures.error import *
class AntSADMatting(AntMeasure):
def __init__(self, task):
super(AntSADMatting, self).__init__(task, 'MATTING-SAD')
assert (task.task_type == 'MATTING')
self.is_support_rank = True
def eva(self, data, label):
if label is not None:
data = zip(data, label)
count = 0
sad = 0.0
for predict, gt in data:
assert(len(predict.shape) == 2)
assert(len(gt.shape) == 2)
sad += np.sum(np.abs(predict - gt))
count += 1
val = sad / count
return {'statistic':{'name':self.name, 'value':[{'name':self.name, 'value': val, 'type': 'SCALAR'}]}}
def AntMSEMatting(AntMeasure):
def __init__(self, task):
super(AntMSEMatting, self).__init__(task, 'MATTING-MSE')
assert (task.task_type == 'MATTING')
self.is_support_rank = True
def eva(self, data, label):
if label is not None:
data = zip(data, label)
count = 0
res = 0.0
for predict, gt in data:
assert(len(predict.shape) == 2)
assert(len(gt.shape) == 2)
res += mse(gt, predict)
count += 1
val = res / count
return {'statistic': {'name': self.name, 'value': [{'name': self.name, 'value': val, 'type': 'SCALAR'}]}}
def AntGradientMatting(AntMeasure):
def __init__(self, task):
# paper: Christoph Rhemann, etc. A Perceptually Motivated Online Benchmark for Image Matting
super(AntGradientMatting, self).__init__(task, 'MATTING-GRADIENT')
assert (task.task_type == 'MATTING')
# delta = 1.4, q = 2
self.is_support_rank = True
def eva(self, data, label):
if label is not None:
data = zip(data, label)
count = 0
res = 0.0
for predict, gt in data:
assert(len(predict.shape) == 2)
assert(len(gt.shape) == 2)
predict_grad = scipy.ndimage.filters.gaussian_filter(predict, 1.4, order=1)
gt_grad = scipy.ndimage.filters.gaussian_filter(gt, 1.4, order=1)
res += np.sum(np.power(predict_grad - gt_grad, 2))
count += 1
val = res / count
return {'statistic': {'name': self.name, 'value': [{'name': self.name, 'value': val, 'type': 'SCALAR'}]}}
def AntConnectivityMatting(AntMeasure):
def __init__(self, task):
# paper: Christoph Rhemann, etc. A Perceptually Motivated Online Benchmark for Image Matting
super(AntConnectivityMatting, self).__init__(task, 'MATTING-CONNECTIVITY')
assert (task.task_type == 'MATTING')
# theta=0.15, p=1
self.is_support_rank = True
def eva(self, data, label):
if label is not None:
data = zip(data, label)
count = 0
res = 0.0
for predict, gt in data:
assert(len(predict.shape) == 2)
assert(len(gt.shape) == 2)
count += 1
val = 0.0
return {'statistic': {'name': self.name, 'value': [{'name': self.name, 'value': val, 'type': 'SCALAR'}]}}
|
[
"jian.fbehind@gmail.com"
] |
jian.fbehind@gmail.com
|
4b64ead8aaa5f3622333594515050ea8272d1336
|
c39e19e8fada4df5bf8999f93a470fc5db0b8ea7
|
/tensorflow/python/keras/distribute/keras_stateful_lstm_model_correctness_test.py
|
4802c8d07d7c1f2aa5807fb9066c48b3319404fb
|
[
"Apache-2.0"
] |
permissive
|
ivomarb/tensorflow
|
6bb05bc6dbaa8e59b43d00a8216bb0b8cb766080
|
df2fbb89588065fca2c6e5fcfba7d8c2b4378591
|
refs/heads/master
| 2020-06-26T05:03:06.321649
| 2019-07-29T20:30:03
| 2019-07-29T21:40:30
| 199,530,704
| 1
| 0
|
Apache-2.0
| 2019-07-29T21:45:39
| 2019-07-29T21:45:38
| null |
UTF-8
|
Python
| false
| false
| 4,359
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateful tf.keras LSTM models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import test
from tensorflow.python.keras.distribute import keras_correctness_test_base
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
def strategies_for_stateful_embedding_model():
"""Returns TPUStrategy with single core device assignment."""
return [
strategy_combinations.tpu_strategy_one_core,
strategy_combinations.tpu_strategy_one_step_one_core
]
def test_combinations_for_stateful_embedding_model():
return (combinations.combine(
distribution=strategies_for_stateful_embedding_model(),
mode='graph',
use_numpy=False,
use_validation_data=False,
run_distributed=[True, False]))
class DistributionStrategyStatefulLstmModelCorrectnessTest(
keras_correctness_test_base
.TestDistributionStrategyEmbeddingModelCorrectnessBase):
def get_model(self,
max_words=10,
initial_weights=None,
distribution=None,
run_distributed=None,
input_shapes=None):
del input_shapes
batch_size = keras_correctness_test_base._GLOBAL_BATCH_SIZE
with keras_correctness_test_base.MaybeDistributionScope(distribution):
word_ids = keras.layers.Input(
shape=(max_words,),
batch_size=batch_size,
dtype=np.int32,
name='words')
word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids)
lstm_embed = keras.layers.LSTM(
units=4, return_sequences=False, stateful=True)(
word_embed)
preds = keras.layers.Dense(2, activation='softmax')(lstm_embed)
model = keras.Model(inputs=[word_ids], outputs=[preds])
if initial_weights:
model.set_weights(initial_weights)
optimizer_fn = gradient_descent_keras.SGD
model.compile(
optimizer=optimizer_fn(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
# TODO(jhseu): Disabled to fix b/130808953. Need to investigate why it
# doesn't work and enable for DistributionStrategy more generally.
@combinations.generate(test_combinations_for_stateful_embedding_model())
def disabled_test_stateful_lstm_model_correctness(
self, distribution, use_numpy, use_validation_data, run_distributed):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
is_stateful_model=True,
run_distributed=run_distributed)
@combinations.generate(
combinations.times(
keras_correctness_test_base.test_combinations_with_tpu_strategies(),
combinations.combine(run_distributed=[True, False])))
def test_incorrectly_use_multiple_cores_for_stateful_lstm_model(
self, distribution, use_numpy, use_validation_data, run_distributed):
with self.assertRaisesRegexp(
ValueError,
'Single core must be used for computation on stateful models. Consider '
'adding `device_assignment` parameter to TPUStrategy'):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
is_stateful_model=True,
run_distributed=run_distributed)
if __name__ == '__main__':
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
94e94f0a49146bdb6be636a8ec08afefab19692d
|
34652a47355a8dbe9200db229a1bbc62619de364
|
/Matlibplots/dataset/weighted_moving_average.py
|
ba1c72dd6c5721cf2c82d983bfe57dd402757fc0
|
[] |
no_license
|
btrif/Python_dev_repo
|
df34ab7066eab662a5c11467d390e067ab5bf0f8
|
b4c81010a1476721cabc2621b17d92fead9314b4
|
refs/heads/master
| 2020-04-02T13:34:11.655162
| 2019-11-10T11:08:23
| 2019-11-10T11:08:23
| 154,487,015
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,098
|
py
|
# Created by Bogdan Trif on 05-02-2018 , 11:25 AM.
import numpy as np
import matplotlib.pyplot as plt
#first generate some datapoint for a randomly sampled noisy sinewave
x = np.random.random(1000)*10
noise = np.random.normal(scale=0.3,size=len(x))
y = np.sin(x) + noise
#plot the data
plt.plot(x,y,'ro',alpha=0.3,ms=4,label='data')
plt.xlabel('Time')
plt.ylabel('Intensity')
def weighted_moving_average(x,y,step_size=0.05,width=1):
bin_centers = np.arange(np.min(x),np.max(x)-0.5*step_size,step_size)+0.5*step_size
bin_avg = np.zeros(len(bin_centers))
#We're going to weight with a Gaussian function
def gaussian(x,amp=1,mean=0,sigma=1):
return amp*np.exp(-(x-mean)**2/(2*sigma**2))
for index in range(0,len(bin_centers)):
bin_center = bin_centers[index]
weights = gaussian(x,mean=bin_center,sigma=width)
bin_avg[index] = np.average(y,weights=weights)
return (bin_centers,bin_avg)
#plot the moving average
bins, average = weighted_moving_average(x,y)
plt.plot(bins, average,label='moving average')
plt.grid(which='both')
plt.show()
|
[
"bogdan.evanzo@gmail.com"
] |
bogdan.evanzo@gmail.com
|
104faf0976a57398e08a2092df8011c01c40ff5a
|
2e5dbb3b851a3e96d715bc50c54b2dbe84b52a7d
|
/dl/lecture01/furniture/download_furniture.py
|
bf49967dc44eb0f6705f470a5f97d465ab403096
|
[] |
no_license
|
devforfu/fastai_courses
|
b3116ec93cef2174e661c1d1884a33d8510f08a5
|
82ee6e299c805f10e224c6a3473ac75ffbfdada4
|
refs/heads/master
| 2020-04-02T07:04:23.766567
| 2018-12-21T16:49:04
| 2018-12-21T16:49:04
| 154,180,455
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,327
|
py
|
import os
import json
import argparse
from io import BytesIO
from pathlib import Path
from dataclasses import dataclass, asdict
from functools import partial
import configparser
from multiprocessing import Pool, cpu_count
import requests
import numpy as np
import pandas as pd
from PIL import Image
from fastai.core import partition
from projects.logger import get_logger
PATH = Path.home()/'data'/'furniture'
IMAGES = PATH/'images'
TRAIN_IMAGES = IMAGES/'train'
VALID_IMAGES = IMAGES/'valid'
TEST_IMAGES = IMAGES/'test'
LABELS = PATH/'labels.csv'
HEADERS = {'User-Agent': 'Python3'}
RANDOM_STATE = 1
np.random.seed(RANDOM_STATE)
log = get_logger()
def main():
args = parse_args()
name = args.subset
path = IMAGES/name
os.makedirs(path, exist_ok=True)
json_file = PATH/f'{name}.json'
index_file = PATH/f'{name}_index.csv'
prepare_url_index(json_file, index_file, pct=args.pct)
log.info(f'Downloading {args.pct:2.2%} of {json_file}...')
index = pd.read_csv(index_file)
info = download(index, path, args.chunk_size, args.proxy)
info.to_pickle(IMAGES/f'{name}_info.pickle')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--subset',
default='train', choices=['train', 'validation', 'test'],
help='Subset to download'
)
parser.add_argument(
'--pct',
default=0.1, type=float,
help='Percent of images to download'
)
parser.add_argument(
'--chunk-size',
default=1000, type=int,
help='Number of images to download per multi-threaded pool run'
)
parser.add_argument(
'--proxy',
default=None,
help='proxy configuration (if required)'
)
args = parser.parse_args()
if args.proxy is not None:
conf = configparser.ConfigParser()
conf.read(args.proxy)
proxy = dict(conf['proxy'])
url = 'socks5://{username}:{password}@{host}:{port}'.format(**proxy)
args.proxy = {'http': url, 'https': url}
return args
def prepare_url_index(json_file, index_file, pct=0.1):
"""Saves meta-information about images into CSV file.
Args:
json_file: Path to JSON file with dataset information.
index_file: Path to CSV file to save image URL, label ID, and image ID
pct: Percentage of dataset to take.
"""
with json_file.open() as file:
content = json.load(file)
images = content['images']
if 'annotations' in content:
labels = content['annotations']
else:
labels = [
{'image_id': img['image_id'], 'label_id': 0}
for img in images]
urls = [img['url'][0] for img in images]
records = pd.DataFrame([
{'url': url, **lbl}
for url, lbl in zip(urls, labels)])
if pct is not None and pct < 1:
pct = max(0.0, min(pct, 1.0))
subsets = []
for key, group in records.groupby('label_id'):
size = int(len(group) * pct)
subsets.extend(group.sample(size, random_state=RANDOM_STATE).to_dict('records'))
records = pd.DataFrame(subsets)
records.to_csv(index_file, index=None)
@dataclass
class ImageInfo:
path: Path
label_id: int
image_id: int
url: str
failed: bool = False
def download(index, path, chunk_size: int=1000, proxy: dict=None):
"""Downloads images with URLs from index dataframe."""
n_cpu = cpu_count()
worker = partial(download_single, path, proxy)
queue = index.to_dict('records')
meta = []
with Pool(n_cpu) as pool:
chunks = partition(queue, chunk_size)
n_chunks = len(chunks)
for i, chunk in enumerate(chunks):
log.info('Downloading chunk %d of %d' % (i+1, n_chunks))
data = [x for x in pool.imap_unordered(worker, chunk) if not x.failed]
meta.extend([asdict(info) for info in data])
return pd.DataFrame(meta)
def download_single(folder, proxy, info):
url = info['url']
img_name = str(info['image_id']) + '.jpg'
path = folder/img_name
result = {
'label_id': info['label_id'],
'image_id': info['image_id'],
'path': path,
'url': url}
if path.exists():
return ImageInfo(**result)
error, msg = True, ''
try:
r = requests.get(
url, allow_redirects=True, timeout=60,
headers=HEADERS, proxies=proxy)
r.raise_for_status()
error = False
except requests.HTTPError:
msg = 'HTTP error'
except requests.ConnectionError:
msg = 'Connection error'
except requests.Timeout:
msg = 'Waiting response too long'
except Exception as e:
msg = str(e)[:80]
if error:
log.warning('%s: %s', msg, url)
return ImageInfo(failed=True, **result)
try:
pil_image = Image.open(BytesIO(r.content)).convert('RGB')
pil_image.save(path, format='JPEG', quality=90)
except Exception as e:
log.warning('Cannot create PIL Image: %s', str(e))
return ImageInfo(failed=True, **result)
if os.stat(path).st_size <= 0:
log.warning('Saved image file is emtpy: %s', path)
return ImageInfo(failed=True, **result)
return ImageInfo(**result)
if __name__ == '__main__':
main()
|
[
"developer.z@outlook.com"
] |
developer.z@outlook.com
|
9a7ac17a45a71f1de7afea23e28e8af49840222c
|
645aa520f2eff7e6001574e57c986aba129e4dd3
|
/tests/test_visualize_pathways.py
|
608a46c9346eadcb1ae18f44d046049486192b9e
|
[
"Apache-2.0"
] |
permissive
|
google/transitfeed
|
08c4ecfb6872b6c0dc409d9a35b32ef515e30253
|
104b5a5b339c62a94c1579d7209a41c7c0833e35
|
refs/heads/master
| 2023-09-05T03:08:17.640950
| 2022-05-23T16:23:53
| 2022-05-23T16:23:53
| 24,061,376
| 680
| 299
|
Apache-2.0
| 2022-09-28T09:02:50
| 2014-09-15T15:16:32
|
Python
|
UTF-8
|
Python
| false
| false
| 649
|
py
|
import os.path
import unittest
import visualize_pathways
def get_file_contents(filename):
with open(filename, 'rb') as f:
return f.read()
class TestVisualizePathways(unittest.TestCase):
def test_gtfs_to_graphviz(self):
testdata_dir = os.path.join(os.path.dirname(__file__),
'data/au-sydney-entrances')
golden_data = get_file_contents(
os.path.join(testdata_dir, 'au-sydney-entrances.dot'))
reader = visualize_pathways.GtfsReader(testdata_dir)
self.assertEqual(
str(visualize_pathways.gtfs_to_graphviz(reader)),
golden_data)
|
[
"noreply@github.com"
] |
google.noreply@github.com
|
24e60d5e6ab4bd5e2bb4c8fbc73fed26abb5cbe7
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part003815.py
|
5e5bdd4608ad234dd7dc490f41749f64838b0581
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher66552(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher66552._instance is None:
CommutativeMatcher66552._instance = CommutativeMatcher66552()
return CommutativeMatcher66552._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 66551
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
425ae4bad5ec2bf6ae6e55096f9b329ab59d9a73
|
022b22d343e2c3d89a865c2b5d684e82c692771e
|
/frontend_docker/project/main/views.py
|
753ec13fe01e0c17192f09af50c9bdade4d1cc2f
|
[
"MIT"
] |
permissive
|
jessequinn/hbsis
|
f4050f5f0850001bc3284ce2c94266ccb00a4c70
|
149b8c41c75732dcbcc23e667831fdb42cab786e
|
refs/heads/master
| 2022-12-18T01:13:27.354613
| 2019-02-08T10:27:35
| 2019-02-08T10:27:35
| 169,249,120
| 0
| 0
|
MIT
| 2022-12-08T01:35:31
| 2019-02-05T13:54:21
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,545
|
py
|
import datetime
import json
import pytz
import urllib.request
from flask import render_template, request, flash, Blueprint, redirect, url_for
from flask_login import login_required, current_user
from project import app, db
from project.models import WeatherRegistration
from .forms import WeatherRegistrationForm
main_blueprint = Blueprint(
'main', __name__,
template_folder='templates'
)
def datetimefilter(value, format="%A"):
'''
Datetime filter for Jinja. Formats date to US/Eastern from the UTC value.
:param value: input value
:param format: format of return date. default day of week.
:return: formatted date
'''
value = datetime.datetime.fromtimestamp(value)
tz = pytz.timezone('US/Eastern')
utc = pytz.timezone('UTC')
value = utc.localize(value, is_dst=None).astimezone(pytz.utc)
local_dt = value.astimezone(tz)
return local_dt.strftime(format)
app.jinja_env.filters['datetimefilter'] = datetimefilter
@main_blueprint.route('/', methods=['GET', 'POST'])
@login_required
def home():
'''
Main page after login. Contains a search form for city weather forecast.
:return: rendered template
'''
weatherRegistrations = db.session.query(WeatherRegistration).filter_by(user_id=current_user.id).all()
with urllib.request.urlopen('http://localhost:5050/countries') as url:
data = json.loads(url.read().decode())
error = None
form = WeatherRegistrationForm(request.form)
form.country.choices = [(c, c) for c in data['data']] # dyanmically produce countries
if request.method == 'POST':
if form.validate_on_submit():
if form.city.data != '':
with urllib.request.urlopen(
'http://localhost:5050/' + form.country.data.upper() + '/' + form.city.data.capitalize()) as url:
ids = json.loads(url.read().decode())
if not ids['data']:
error = 'No data exists for ' + form.city.data.capitalize() + '!'
return render_template('index.html', form=form, error=error, user=current_user, weatherRegistrations=weatherRegistrations)
else:
if any(ids['data'][0]['id'] == wr.city_id for wr in weatherRegistrations):
error = form.city.data.capitalize() + ' has already been registered.'
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
else:
new_weatherregistration = WeatherRegistration(form.city.data, ids['data'][0]['id'],
form.country.data, current_user.id)
db.session.add(new_weatherregistration)
failed = False
try:
db.session.commit()
except Exception as e:
db.session.rollback()
db.session.flush()
failed = True
print(e)
if failed:
error = 'Error with registration.'
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
else:
flash(form.city.data.capitalize() + ' was registered successfully.')
return redirect(url_for('main.home'))
else:
error = 'Enter a city name!'
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
@main_blueprint.route('/forecast<id>')
@login_required
def forecast(id):
'''
5 day forecast page.
:param id: city id
:return: rendered template
'''
with urllib.request.urlopen(
'http://api.openweathermap.org/data/2.5/forecast/daily?id=' + id + '&cnt=5&APPID=eb8b1a9405e659b2ffc78f0a520b1a46&units=metric') as url:
data = json.loads(url.read().decode())
return render_template('forecast.html', data=data)
@main_blueprint.route('/remove<id>')
@login_required
def remove(id):
'''
Function simply removes city from list of cities.
:param id: city id
:return: rendered template
'''
with urllib.request.urlopen('http://localhost:5050/countries') as url:
data = json.loads(url.read().decode())
form = WeatherRegistrationForm(request.form)
form.country.choices = [(c, c) for c in data['data']] # dyanmically produce countries
db.session.query(WeatherRegistration).filter_by(id=id).delete()
failed = False
try:
db.session.commit()
except Exception as e:
db.session.rollback()
db.session.flush()
failed = True
print(e)
if failed:
error = 'Could not remove registration.'
weatherRegistrations = db.session.query(WeatherRegistration).filter_by(user_id=current_user.id).all()
return render_template('index.html', form=form, error=error, user=current_user,
weatherRegistrations=weatherRegistrations)
else:
flash('Registration was removed successfully.')
return redirect(url_for('main.home'))
|
[
"me@jessequinn.info"
] |
me@jessequinn.info
|
bc27b8fa61132158c9004b0c3d96302cee57c123
|
c9fde4576216a22e8d5711bbe97adda1aafa2f08
|
/model-optimizer/mo/front/caffe/extractor.py
|
8f6115655b5973294584661bca6889d30733e4aa
|
[
"Apache-2.0"
] |
permissive
|
dliang0406/dldt
|
c703d6a837de3f996528fc8a9543f9530b23342c
|
d9b10abcebafe8b10ba81e09e433de7a366c072c
|
refs/heads/2018
| 2020-04-03T08:24:47.723353
| 2018-10-29T07:58:05
| 2018-10-29T07:58:05
| 155,132,108
| 3
| 1
|
Apache-2.0
| 2019-10-10T08:39:46
| 2018-10-29T01:03:54
|
C++
|
UTF-8
|
Python
| false
| false
| 5,994
|
py
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.caffe.extractors.batchnorm import batch_norm_ext
from mo.front.caffe.extractors.concat import concat_ext
from mo.front.caffe.extractors.convolution import convolution_ext
from mo.front.caffe.extractors.deconvolution import deconvolution_ext
from mo.front.caffe.extractors.eltwise import eltwise_ext
from mo.front.caffe.extractors.flatten import flatten_ext
from mo.front.caffe.extractors.inner_product import inner_product_ext
from mo.front.caffe.extractors.input import global_input_ext, input_ext
from mo.front.caffe.extractors.lrn import lrn_ext
from mo.front.caffe.extractors.native_caffe import native_caffe_node_extractor
from mo.front.caffe.extractors.permute import permute_ext
from mo.front.caffe.extractors.pooling import pooling_ext
from mo.front.caffe.extractors.power import power_ext
from mo.front.caffe.extractors.relu import relu_ext
from mo.front.caffe.extractors.reshape import reshape_ext
from mo.front.caffe.extractors.roipooling import roipooling_ext
from mo.front.caffe.extractors.scale import scale_ext
from mo.front.caffe.extractors.slice import slice_ext
from mo.front.caffe.extractors.softmax import softmax_ext
from mo.front.common.partial_infer.elemental import copy_shape_infer
from mo.front.common.register_custom_ops import extension_op_extractor
from mo.front.extractor import CaffePythonFrontExtractorOp, FrontExtractorOp
from mo.graph.graph import Node
from mo.ops.op import Op
from mo.utils.error import Error
from mo.utils.utils import refer_to_faq_msg
def node_pb_arg(pb_extractor):
return lambda node: pb_extractor(node.pb, node.model_pb)
"""
Keys are names that appear as layer names in .prototxt.
Full list is available here: http://caffe.berkeleyvision.org/tutorial/layers.html
"""
caffe_type_extractors = {
# Data Layers
'input': node_pb_arg(input_ext),
'globalinput': node_pb_arg(global_input_ext),
# Common Layers
'innerproduct': node_pb_arg(inner_product_ext),
'inner_product': node_pb_arg(inner_product_ext),
'dropout': node_pb_arg(lambda _, __: dict(op='Dropout', infer=copy_shape_infer)),
# Vision Layers
'convolution': node_pb_arg(convolution_ext),
'deconvolution': node_pb_arg(deconvolution_ext),
'pooling': node_pb_arg(pooling_ext),
# Normalization Layers
'batchnorm': node_pb_arg(batch_norm_ext),
'lrn': node_pb_arg(lrn_ext),
# Activation Layers
'power': node_pb_arg(power_ext),
'relu': node_pb_arg(relu_ext),
'scale': node_pb_arg(scale_ext),
# Utility Layers
'concat': node_pb_arg(concat_ext),
'eltwise': node_pb_arg(eltwise_ext),
'flatten': node_pb_arg(flatten_ext),
'reshape': node_pb_arg(reshape_ext),
'slice': node_pb_arg(slice_ext),
'softmax': node_pb_arg(softmax_ext),
# Custom, implemented in IE, SSD-specific
'permute': node_pb_arg(permute_ext),
# Custom, implemented in IE, Fast-RCNN-specific
'roipooling': node_pb_arg(roipooling_ext),
}
def common_caffe_fields(node: Node) -> dict:
if node.has_valid('op') and node.op == 'Identity':
return {}
pb = node.pb if node.pb else node
layer_type = pb.type
if isinstance(layer_type, int):
layer_type = pb.LayerType.DESCRIPTOR.values_by_number[layer_type].name
layer_type = str(layer_type)
return {
'kind': 'op',
'name': pb.name,
'type': layer_type,
'op': layer_type,
# generic code relies on op; it should be overridden by specific op extractor
'infer': None,
'precision': 'FP32' # TODO use real precision derived from the model
}
def caffe_extractor(node: Node, lowered_keys_map: dict) -> (bool, dict):
if node.has_valid('op') and node.op == 'Identity':
return True, {}
result = common_caffe_fields(node)
supported = False
name = None
layer_type = result['type'].lower()
if layer_type in lowered_keys_map:
layer_type = lowered_keys_map[layer_type]
assert layer_type in caffe_type_extractors
name = layer_type
if name: # it is either standard or registered via CustomLayersMapping.xml
attrs = caffe_type_extractors[name](node)
# intentionally as Python registry if not found returns None
if attrs is not None:
result.update(attrs)
supported = True
if not supported:
raise Error('Found custom layer "{}". Model Optimizer does not support this layer. '.format(node.id) +
'Please, implement extension. ' +
refer_to_faq_msg(45))
if 'infer' not in result or not result['infer']:
result.update(native_caffe_node_extractor(node))
phase_attr = check_phase(node)
result.update(phase_attr)
return supported, result
def check_phase(node: Node):
if node.has_valid('pb') and hasattr(node.pb, 'include'):
for i in node.pb.include:
if hasattr(i, 'phase'):
return {'phase': i.phase}
return {}
def register_caffe_python_extractor(op: Op, name: str = None):
if not name and hasattr(op, 'op'):
name = op.op
if not name:
raise Error("Can not register Op {}. Please, call function 'register_caffe_python_extractor'"
"with parameter 'name' .".format(op),
refer_to_faq_msg(87))
CaffePythonFrontExtractorOp.registered_ops[name] = lambda node: extension_op_extractor(node, op)
|
[
"openvino_pushbot@intel.com"
] |
openvino_pushbot@intel.com
|
a0364d6e371684383b61216b8d9e5677beb97814
|
b394bb6bd3e8848688b525f55e82962f152c1bb3
|
/demos/upload/linear_systems/Complexity of Mat-Mat multiplication and LU.py
|
f4e79703fc521e02f24bfee84b294558fbd90ad9
|
[] |
no_license
|
lukeolson/cs450-f20-demos
|
02c2431d7696348cf9ca1ab67bdd5c44a97ac38b
|
040e7dfa15c68f7f426cf69655cb600926f9f626
|
refs/heads/master
| 2023-01-22T19:12:33.394521
| 2020-12-03T19:48:18
| 2020-12-03T19:48:18
| 288,542,898
| 5
| 10
| null | 2020-10-05T19:39:07
| 2020-08-18T19:13:52
| null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Relative cost of matrix operations
# In[1]:
import numpy as np
import scipy.linalg as spla
import scipy as sp
import matplotlib.pyplot as pt
from time import time
np.alterdot()
# In[2]:
n_values = (10**np.linspace(1, 3.25, 15)).astype(np.int32)
n_values
# In[3]:
def mat_mul(A):
return A.dot(A)
for name, f in [
("mat_mul", mat_mul),
("lu", spla.lu_factor),
]:
times = []
print("----->", name)
for n in n_values:
print(n)
A = np.random.randn(n, n)
start_time = time()
f(A)
times.append(time() - start_time)
pt.plot(n_values, times, label=name)
pt.grid()
pt.legend(loc="best")
pt.xlabel("Matrix size $n$")
pt.ylabel("Wall time [s]")
# * The faster algorithms make the slower ones look bad. But... it's all relative.
# * Is there a better way of plotting this?
# * Can we see the asymptotic cost ($O(n^3)$) of these algorithms from the plot?
# In[3]:
|
[
"luke.olson@gmail.com"
] |
luke.olson@gmail.com
|
90afc3a58e3e9c99e3416d2d843ca5e084f3e87a
|
a2b6bc9bdd2bdbe5871edb613065dd2397175cb3
|
/medium/Rotate List.py
|
8ad51c14cc944ec9af60a8ec92c7c8d4a1311263
|
[] |
no_license
|
Asunqingwen/LeetCode
|
ed8d2043a31f86e9e256123439388d7d223269be
|
b7c59c826bcd17cb1333571eb9f13f5c2b89b4ee
|
refs/heads/master
| 2022-09-26T01:46:59.790316
| 2022-09-01T08:20:37
| 2022-09-01T08:20:37
| 95,668,066
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,931
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/10/29 0029 10:26
# @Author : 没有蜡笔的小新
# @E-mail : sqw123az@sina.com
# @FileName: Rotate List.py
# @Software: PyCharm
# @Blog :https://blog.csdn.net/Asunqingwen
# @GitHub :https://github.com/Asunqingwen
"""
Given a linked list, rotate the list to the right by k places, where k is non-negative.
Example 1:
Input: 1->2->3->4->5->NULL, k = 2
Output: 4->5->1->2->3->NULL
Explanation:
rotate 1 steps to the right: 5->1->2->3->4->NULL
rotate 2 steps to the right: 4->5->1->2->3->NULL
Example 2:
Input: 0->1->2->NULL, k = 4
Output: 2->0->1->NULL
Explanation:
rotate 1 steps to the right: 2->0->1->NULL
rotate 2 steps to the right: 1->2->0->NULL
rotate 3 steps to the right: 0->1->2->NULL
rotate 4 steps to the right: 2->0->1->NULL
"""
import json
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def stringToIntegerList(input):
return json.loads(input)
def stringToListNode(input):
input = input.split(',')
dummyRoot = ListNode(0)
ptr = dummyRoot
for number in input:
ptr.next = ListNode(int(number))
ptr = ptr.next
ptr = dummyRoot.next
return ptr
def listNodeToString(node):
if not node:
return "[]"
result = ""
while node:
result += str(node.val) + ", "
node = node.next
return "[" + result[:-2] + "]"
def rotateRight(head: ListNode, k: int) -> ListNode:
p1 = head
length = 0
while p1:
length += 1
p1 = p1.next
if length <= 1 or k == 0:
return head
k %= length
p1, p2 = head, head
for i in range(k):
p2 = p2.next
for i in range(length - k):
if not p1.next:
p1.next = head
if not p2.next:
p2.next = head
p1 = p1.next
p2 = p2.next
head = p1
for i in range(length - 1):
p1 = p1.next
p1.next = None
return head
if __name__ == '__main__':
input = "1,2"
k = 0
head = stringToListNode(input)
result = rotateRight(head, k)
result = listNodeToString(result)
print(result)
|
[
"sqw123az@sina.com"
] |
sqw123az@sina.com
|
f3815f70c32f2896f4449435b1bacfddcf8375c9
|
39e647e9ec8524a1cee90ef15f37a3d3bbf8ac43
|
/poet/trunk/pythonLibs/Django-1.3/tests/modeltests/proxy_models/models.py
|
30f14bc931b97f11cff5cbcf7a99bf9d15829bc7
|
[
"BSD-3-Clause"
] |
permissive
|
AgileAdaptiveTools/POETTools
|
85158f043e73b430c1d19a172b75e028a15c2018
|
60244865dd850a3e7346f9c6c3daf74ea1b02448
|
refs/heads/master
| 2021-01-18T14:46:08.025574
| 2013-01-28T19:18:11
| 2013-01-28T19:18:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,229
|
py
|
"""
By specifying the 'proxy' Meta attribute, model subclasses can specify that
they will take data directly from the table of their base class table rather
than using a new table of their own. This allows them to act as simple proxies,
providing a modified interface to the data from the base class.
"""
from django.contrib.contenttypes.models import ContentType
from django.db import models
# A couple of managers for testing managing overriding in proxy model cases.
class PersonManager(models.Manager):
def get_query_set(self):
return super(PersonManager, self).get_query_set().exclude(name="fred")
class SubManager(models.Manager):
def get_query_set(self):
return super(SubManager, self).get_query_set().exclude(name="wilma")
class Person(models.Model):
"""
A simple concrete base class.
"""
name = models.CharField(max_length=50)
objects = PersonManager()
def __unicode__(self):
return self.name
class Abstract(models.Model):
"""
A simple abstract base class, to be used for error checking.
"""
data = models.CharField(max_length=10)
class Meta:
abstract = True
class MyPerson(Person):
"""
A proxy subclass, this should not get a new table. Overrides the default
manager.
"""
class Meta:
proxy = True
ordering = ["name"]
objects = SubManager()
other = PersonManager()
def has_special_name(self):
return self.name.lower() == "special"
class ManagerMixin(models.Model):
excluder = SubManager()
class Meta:
abstract = True
class OtherPerson(Person, ManagerMixin):
"""
A class with the default manager from Person, plus an secondary manager.
"""
class Meta:
proxy = True
ordering = ["name"]
class StatusPerson(MyPerson):
"""
A non-proxy subclass of a proxy, it should get a new table.
"""
status = models.CharField(max_length=80)
# We can even have proxies of proxies (and subclass of those).
class MyPersonProxy(MyPerson):
class Meta:
proxy = True
class LowerStatusPerson(MyPersonProxy):
status = models.CharField(max_length=80)
class User(models.Model):
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class UserProxy(User):
class Meta:
proxy = True
class UserProxyProxy(UserProxy):
class Meta:
proxy = True
# We can still use `select_related()` to include related models in our querysets.
class Country(models.Model):
name = models.CharField(max_length=50)
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country)
def __unicode__(self):
return self.name
class StateProxy(State):
class Meta:
proxy = True
# Proxy models still works with filters (on related fields)
# and select_related, even when mixed with model inheritance
class BaseUser(models.Model):
name = models.CharField(max_length=255)
class TrackerUser(BaseUser):
status = models.CharField(max_length=50)
class ProxyTrackerUser(TrackerUser):
class Meta:
proxy = True
class Issue(models.Model):
summary = models.CharField(max_length=255)
assignee = models.ForeignKey(TrackerUser)
def __unicode__(self):
return ':'.join((self.__class__.__name__,self.summary,))
class Bug(Issue):
version = models.CharField(max_length=50)
reporter = models.ForeignKey(BaseUser)
class ProxyBug(Bug):
"""
Proxy of an inherited class
"""
class Meta:
proxy = True
class ProxyProxyBug(ProxyBug):
"""
A proxy of proxy model with related field
"""
class Meta:
proxy = True
class Improvement(Issue):
"""
A model that has relation to a proxy model
or to a proxy of proxy model
"""
version = models.CharField(max_length=50)
reporter = models.ForeignKey(ProxyTrackerUser)
associated_bug = models.ForeignKey(ProxyProxyBug)
class ProxyImprovement(Improvement):
class Meta:
proxy = True
|
[
"ssaltzman@mitre.org"
] |
ssaltzman@mitre.org
|
fd8aa3ea52ca26a7bff1f7c7d6f9d22f8f4d59b7
|
c038d06c31de0919d70c04f517f7490146ff80df
|
/train_nn.py
|
b8a8c4cc52d03dcfcdb4e339de6e584971a4eca1
|
[] |
no_license
|
KWAN-YWAN/gtd-analytics
|
235df79f9b95b1734928cd2a9b4d54c5bf3f88e8
|
10fd7fa2965bb0efcc2396d86d3998afbc0fe7c8
|
refs/heads/master
| 2020-03-28T18:13:39.976015
| 2018-07-11T21:56:29
| 2018-07-11T21:56:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,579
|
py
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
# Plotter library
import seaborn as sns
import matplotlib.pyplot as plt
# Own libraries
import data_preparation as prep
from nn import Nn
from sklearn import preprocessing
def filter_data(df):
# Filter for only kidnapping data (1st, 2nd or 3rd attack type)
kidnap_cats = [5, 6]
df = df[df.attacktype1.isin(kidnap_cats) | df.attacktype2.isin(kidnap_cats) | df.attacktype3.isin(
kidnap_cats) | df.ishostkid == 1]
# Drop attacktype columns. They aren't needed anymore
df = df.drop(['attacktype1', 'attacktype2', 'attacktype3', 'ishostkid'], axis=1)
# Filter also broken data from our classes
df = df[df.hostkidoutcome.notnull()]
# Filter data for NaN nreleased or value -99
df = df[df.nreleased.notnull()]
df = df[df.nreleased != -99]
# Filter also data where nhostkid is lower than nreleased
df = df[df.nhostkid >= df.nreleased]
return df
def augmentate_data(df):
# Add an ID group for gname to the DataFrame
df['gname_id'], _ = prep.str_to_index_arr(df['gname'])
# Add a normalisation for how many of the hostage victims survived
df['nreleased_p'] = np.divide(df.nreleased, df.nhostkid)
# Add a column of died hostages
df['ndied'] = np.subtract(df.nhostkid, df.nreleased)
# Drop all string columns and keep only numeric ones.
df = df._get_numeric_data()
return df
def handle_NaN_in_data(df):
from sklearn.preprocessing import Imputer
fill_NaN = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputed_df = pd.DataFrame(fill_NaN.fit_transform(df))
imputed_df.columns = df.columns
imputed_df.index = df.index
df = imputed_df
return df
def set_NaN_to_value(df, value):
return df.replace(np.nan, value)
def set_unknown_to_NaN(df, unknown_values):
for unknown_value in unknown_values:
df.replace(unknown_value, np.nan)
return df
def visualize_data(df, path='', suffix=''):
# First: a plot about number of kidnapped persons
sns.set(style="darkgrid", color_codes=True)
g1 = sns.jointplot(
'iyear',
'nhostkid',
data=df,
kind="reg",
color='r',
size=7,
xlim=[1970, 2016]
)
g1.set_axis_labels('Years', 'Number of kidnapped victims')
g1.savefig(path + 'interaction-iyear_nhostkid' + suffix + '.png')
g1.savefig(path + 'interaction-iyear_nhostkid' + suffix + '.pdf')
# Outcomes vs percentage of released victims
g2 = sns.violinplot(
x='hostkidoutcome',
y='nreleased_p',
data=df,
hue='ransom'
)
g2.figure.savefig(path + 'interaction-hostkidoutcome_nreleased_p' + suffix + '.png')
g2.figure.savefig(path + 'interaction-hostkidoutcome_nreleased_p' + suffix + '.pdf')
### Correlation
corr = df.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
g3 = sns.heatmap(
corr,
mask=mask,
cmap=cmap,
vmax=.3,
center=0,
square=True,
linewidths=.5,
cbar_kws={"shrink": .5}
)
g3.figure.savefig(path + 'correlation_full' + suffix + '.png')
g3.figure.savefig(path + 'correlation_full' + suffix + '.pdf')
def train(X, Y):
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(X_train, Y_train)
def train_svm(X, y, C=1.0):
'''
Trains the SVM with X as input and y as output data
Input:
- X: Input vector with features
- y: Output vector with one label column
- C: SVM regularisation parameter
'''
from sklearn.svm import SVC
svm_model = SVC(kernel='linear', C=C, decision_function_shape='ovr')
svm_model.fit(X, y)
return svm_model
def predict_svm(model, X, y):
Z = model.predict(X)
return Z
if __name__ == "__main__":
#####
# The purpose of our classifier is to predict the hostkidoutcome category and a percentage of released persons.
# Y: hostkidoutcome, npreleased
# X: extended, iyear, gname_id, nhostkid, ndays, ransom, ransompaid, ishostkid
#####
### Data filtering
# Read data and exclude cols
# @Snippet: To exclude: lambda x: x not in ["eventid","imonth","iday", "attacktype2","claims2","claimmode2","claimmode3","gname2"]
df = prep.read_data('globalterrorismdb_0617dist.csv',
usecols=['nreleased', 'attacktype1', 'attacktype2', 'attacktype3', 'extended', 'iyear', 'gname',
'nhostkid', 'nhours', 'ndays', 'ransom', 'ransompaid', 'ransompaidus', 'ishostkid',
'hostkidoutcome'])
df = filter_data(df)
df = augmentate_data(df)
# We also have sometimes -99 or -9 as values when things were unknown. We have to replace them as well with NaNs
df = set_unknown_to_NaN(df, [-9, -99])
# We have a whole number of columns which contains NaNs for missing data. To overcome those, we simply use the sklearn Imputer to fill the NaNs with the mean values
df = set_NaN_to_value(df, -1)
head = df.head()
print(df.head())
# Plot data
visualize_data(df, path="plots/")
print('Resulting columns for training: \n{}\n'.format(df.columns))
# Normalize to 0-1
x = df.values
x_normed = x / x.max(axis=0)
df = pd.DataFrame(columns=[head], data=x_normed)
print(df)
### Separate set into train, validation, test by assigning each to the preferred class randomly.
train = df.sample(frac=0.6, replace=True)
validation = df.sample(frac=0.2, replace=True)
test = df.sample(frac=0.2, replace=True)
labels = ['hostkidoutcome', 'nreleased_p']
X_train, Y_train, Y_train_columns = prep.separate_labels(train, labels)
X_validation, Y_validation, Y_validation_columns = prep.separate_labels(validation, labels)
X_test, Y_test, Y_test_columns = prep.separate_labels(test, labels)
nn = Nn()
nn.create_model()
nn.load_model_from_json()
# nn.train(x=X_train.values,
# y=Y_train.values,
# validation_data=(X_validation.values, Y_validation.values))
# nn.persist_model()
score = nn.evaluate(x=X_test, y=Y_test)
print("Achieved Score:", score)
|
[
"vinh-ngu@hotmail.com"
] |
vinh-ngu@hotmail.com
|
96a31bd87d182e38af66c9502dda52cbddd18184
|
9405aa570ede31a9b11ce07c0da69a2c73ab0570
|
/aliyun-python-sdk-ons/aliyunsdkons/request/v20190214/OnsMessagePageQueryByTopicRequest.py
|
18afe3920e90c8d8b1f8495a693430691275ac77
|
[
"Apache-2.0"
] |
permissive
|
liumihust/aliyun-openapi-python-sdk
|
7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b
|
c7b5dd4befae4b9c59181654289f9272531207ef
|
refs/heads/master
| 2020-09-25T12:10:14.245354
| 2019-12-04T14:43:27
| 2019-12-04T14:43:27
| 226,002,339
| 1
| 0
|
NOASSERTION
| 2019-12-05T02:50:35
| 2019-12-05T02:50:34
| null |
UTF-8
|
Python
| false
| false
| 2,300
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class OnsMessagePageQueryByTopicRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ons', '2019-02-14', 'OnsMessagePageQueryByTopic','ons')
def get_PreventCache(self):
return self.get_query_params().get('PreventCache')
def set_PreventCache(self,PreventCache):
self.add_query_param('PreventCache',PreventCache)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_Topic(self):
return self.get_query_params().get('Topic')
def set_Topic(self,Topic):
self.add_query_param('Topic',Topic)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_BeginTime(self):
return self.get_query_params().get('BeginTime')
def set_BeginTime(self,BeginTime):
self.add_query_param('BeginTime',BeginTime)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_TaskId(self):
return self.get_query_params().get('TaskId')
def set_TaskId(self,TaskId):
self.add_query_param('TaskId',TaskId)
|
[
"haowei.yao@alibaba-inc.com"
] |
haowei.yao@alibaba-inc.com
|
8fbc50489eff50ed424d41fd9e73da22a933f129
|
4b17b361017740a4113ba358460293e55c9bee49
|
/LAB02/04-CloudAlbum-XRAY/cloudalbum/model/models.py
|
f21c3b69306956a5492bd0f320264da3289f4e9d
|
[
"MIT"
] |
permissive
|
liks79/aws-chalice-migration-workshop
|
aa01fa5a585a548c8408ba7448d731deefbbbd18
|
5115117504a3e2b897dc8444be58de0e4e12586a
|
refs/heads/master
| 2022-12-25T09:50:44.821495
| 2018-11-20T03:05:52
| 2018-11-20T03:05:52
| 139,544,736
| 6
| 8
|
MIT
| 2022-12-08T02:17:36
| 2018-07-03T07:26:11
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 3,505
|
py
|
"""
model.models.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CloudAlbum is a sample application for TechSummit 2018 workshop.
:copyright: © 2018 by Sungshik Jou.
:license: BSD, see LICENSE for more details.
"""
from sqlalchemy import Float, DateTime, ForeignKey, Integer, String
from flask_login import UserMixin
from flask_sqlalchemy import SQLAlchemy
from cloudalbum import login
db = SQLAlchemy()
class User(UserMixin, db.Model):
"""
Database Model class for User table
"""
__tablename__ = 'User'
id = db.Column(Integer, primary_key=True)
username = db.Column(String(50), unique=False)
email = db.Column(String(50), unique=True)
password = db.Column(String(100), unique=False)
photos = db.relationship('Photo',
backref='user',
cascade='all, delete, delete-orphan')
def __init__(self, name, email, password):
self.username = name
self.email = email
self.password = password
def __repr__(self):
return '<%r %r %r>' % (self.__tablename__, self.username, self.email)
class Photo(db.Model):
"""
Database Model class for Photo table
"""
__tablename__ = 'Photo'
id = db.Column(Integer, primary_key=True)
user_id = db.Column(Integer, ForeignKey(User.id))
tags = db.Column(String(400), unique=False)
desc = db.Column(String(400), unique=False)
filename_orig = db.Column(String(400), unique=False)
filename = db.Column(String(400), unique=False)
filesize = db.Column(Integer, unique=False)
geotag_lat = db.Column(Float, unique=False)
geotag_lng = db.Column(Float, unique=False)
upload_date = db.Column(DateTime, unique=False)
taken_date = db.Column(DateTime, unique=False)
make = db.Column(String(400), unique=False)
model = db.Column(String(400), unique=False)
width = db.Column(String(400), unique=False)
height = db.Column(String(400), unique=False)
city = db.Column(String(400), unique=False)
nation = db.Column(String(400), unique=False)
address = db.Column(String(400), unique=False)
def __init__(self, user_id, tags, desc, filename_orig, filename, filesize, geotag_lat, geotag_lng, upload_date,
taken_date, make, model, width, height, city, nation, address):
"""Initialize"""
self.user_id = user_id
self.tags = tags
self.desc = desc
self.filename_orig = filename_orig
self.filename = filename
self.filesize = filesize
self.geotag_lat = geotag_lat
self.geotag_lng = geotag_lng
self.upload_date = upload_date
self.taken_date = taken_date
self.make = make
self.model = model
self.width = width
self.height = height
self.city = city
self.nation = nation
self.address = address
def __repr__(self):
"""print information"""
return '<%r %r %r>' % (self.__tablename__, self.user_id, self.upload_date)
@login.user_loader
def load_user(id):
"""
User information loader for authenticated user
:param id: user id
:return: user record from User table
"""
# user = User.query.get(int(id))
#
# minutes = conf['SESSION_TIMEOUT']
#
# if user.last_seen < (datetime.utcnow() - datetime.timedelta(minutes=minutes)):
# # Session has timed out
# return None
#
# return User.query.get(user)
return User.query.get(int(id))
|
[
"liks79@gmail.com"
] |
liks79@gmail.com
|
2e12d79fa9ad4afdc9d45903736aa325321b8bdf
|
780b01976dad99c7c2ed948b8473aa4e2d0404ba
|
/scripts/alphas_archive/zw_contfut/alpha_ichimokucloud_long_bullish_feb02_.py
|
ac7c5e278698c535bbbd3bc0ba40df1577ab83ca
|
[] |
no_license
|
trendmanagement/tmqrexo_alexveden
|
a8ad699c2c3df4ce283346d287aff4364059a351
|
4d92e2ee2bc97ea2fcf075382d4a5f80ce3d72e4
|
refs/heads/master
| 2021-03-16T08:38:00.518593
| 2019-01-23T08:30:18
| 2019-01-23T08:30:18
| 56,336,692
| 1
| 1
| null | 2019-01-22T14:21:03
| 2016-04-15T17:05:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
#
#
# Automatically generated file
# Created at: 2017-02-09 10:10:05.686710
#
from strategies.strategy_ichimokucloud import StrategyIchimokuCloud
from backtester.strategy import OptParam
from backtester.swarms.rebalancing import SwarmRebalance
from backtester.costs import CostsManagerEXOFixed
from backtester.swarms.rankingclasses import RankerBestWithCorrel
from backtester.strategy import OptParamArray
STRATEGY_NAME = StrategyIchimokuCloud.name
STRATEGY_SUFFIX = "_Bullish_Feb02_"
STRATEGY_CONTEXT = {
'swarm': {
'members_count': 1,
'ranking_class': RankerBestWithCorrel(window_size=-1, correl_threshold=-0.5),
'rebalance_time_function': SwarmRebalance.every_friday,
},
'strategy': {
'exo_name': 'ZW_ContFut',
'class': StrategyIchimokuCloud,
'opt_params': [
OptParamArray('Direction', [1]),
OptParam('conversion_line_period', 9, 2, 22, 5),
OptParam('base_line_period', 26, 13, 13, 2),
OptParam('leading_spans_lookahead_period', 26, 26, 26, 13),
OptParam('leading_span_b_period', 52, 2, 106, 30),
OptParamArray('RulesIndex', [14, 6, 13]),
OptParam('MedianPeriod', 5, 14, 26, 12),
],
},
'costs': {
'context': {
'costs_options': 3.0,
'costs_futures': 3.0,
},
'manager': CostsManagerEXOFixed,
},
}
|
[
"i@alexveden.com"
] |
i@alexveden.com
|
d2f0d4f6c0fbf446e71d0d8932ea484f5254a496
|
e10a6d844a286db26ef56469e31dc8488a8c6f0e
|
/protein_lm/evaluation.py
|
65526ef1936021cd9841bd15b70a9acb1961835f
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
Jimmy-INL/google-research
|
54ad5551f97977f01297abddbfc8a99a7900b791
|
5573d9c5822f4e866b6692769963ae819cb3f10d
|
refs/heads/master
| 2023-04-07T19:43:54.483068
| 2023-03-24T16:27:28
| 2023-03-24T16:32:17
| 282,682,170
| 1
| 0
|
Apache-2.0
| 2020-07-26T15:50:32
| 2020-07-26T15:50:31
| null |
UTF-8
|
Python
| false
| false
| 4,563
|
py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Baselines and evaluation metrics for Jax language models."""
import itertools
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as np
from protein_lm import utils
class EmpiricalBaseline():
"""Empirical baseline as described in the ProGen paper.
References:
[ProGen](https://www.biorxiv.org/content/10.1101/2020.03.07.982272v1)
"""
def __init__(self, domain, train_ds, alpha=1.):
"""Creates an instance of this class.
# TODO(gandreea): It's unclear how to handle the length (EOS token). The
# fact that the uniform baseline is reported as (perplexity=25,
# accuracy=0.04) suggests that the EOS prediction step is not included.
Args:
domain: An instance of domains.Domain.
train_ds: A tf.data.Dataset containing the data to be used for computing
the empirical distribution.
alpha: A float indicating the Laplace smoothing constant.
"""
self._vocab_size = domain.vocab_size
self._token_indices = [
idx for idx in range(len(domain.vocab.tokens))
if idx != domain.vocab.bos and idx != domain.vocab.eos]
self._mask_token = domain.vocab.bos
self._empirical_dist = np.zeros((len(self._token_indices),))
for batch in train_ds:
batch = np.atleast_2d(batch)
batch_one_hot = np.eye(self._vocab_size)[batch]
batch_one_hot = np.take(batch_one_hot, self._token_indices, axis=-1)
self._empirical_dist += np.sum(np.sum(batch_one_hot, axis=0), axis=0)
self._empirical_dist += alpha # Laplace smoothing.
self._empirical_dist /= np.sum(self._empirical_dist)
def evaluate_batch(self, batch):
"""Computes all metrics on the given batch."""
labels = np.atleast_2d(batch)
logits = np.log(self._empirical_dist)
logits = np.tile(logits, list(labels.shape) + [1])
weights = np.where(labels != self._mask_token, 1, 0)
metrics = utils.compute_metrics(logits, labels, weights)
for key, value in metrics.items():
metrics[key] = jnp.atleast_1d(value)
return metrics
def combine_metrics(step_metrics):
"""Given a list of metric dicts, combine to a single summary metrics dict.
Args:
step_metrics: A dict with (metric name, metric value) items. Contains summed
metrics and the corresponding denominator (the number of next-token
prediction instances). Each metric value have at least one dimension.
Returns:
A dict with (metric name, metric value) items containing combined metrics.
"""
metrics_all = common_utils.get_metrics(step_metrics)
lr = None
if 'learning_rate' in metrics_all:
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop
if lr is not None:
summary['learning_rate'] = lr
# Calculate (clipped) perplexity after averaging log-perplexities:
if 'loss' in summary:
summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)
return summary
def evaluate(model, eval_ds, num_eval_steps=None):
"""Evaluates model on eval_ds for num_eval_steps.
Args:
model: A model to use for evaluation. Must have an evaluate_batch() method.
eval_ds: A tensorflow dataset containing the data to be used for evaluation.
num_eval_steps: If given, evaluate for this many steps, otherwise use the
entire dataset.
Returns:
A dictionary with (metric name, metric value) items.
"""
eval_metrics = []
eval_iter = iter(eval_ds)
if num_eval_steps is None:
num_iter = itertools.repeat(1)
else:
num_iter = range(num_eval_steps)
for _, eval_batch in zip(num_iter, eval_iter):
eval_batch = np.asarray(eval_batch)
metrics = model.evaluate_batch(eval_batch)
eval_metrics.append(metrics)
eval_summary = combine_metrics(eval_metrics)
return eval_summary
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
fe3182900da8d8bb4dbc2094bba70c61c293ed2a
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/fyyJRDHcTe9REs4Ni_2.py
|
fa8d5a9f92273ccb98b5f5ce47ca0d2a51943ab1
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,033
|
py
|
"""
Create a function that takes three arguments (first dictionary, second
dictionary, key) in order to:
1. Return the boolean `True` if both dictionaries have the same values for the same keys.
2. If the dictionaries don't match, return the string `"Not the same"`, or the string `"One's empty"` if only one of the dictionaries contains the given key.
### Examples
dict_first = { "sky": "temple", "horde": "orcs", "people": 12, "story": "fine", "sun": "bright" }
dict_second = { "people": 12, "sun": "star", "book": "bad" }
check(dict_first, dict_second, "horde") ➞ "One's empty"
check(dict_first, dict_second, "people") ➞ True
check(dict_first, dict_second, "sun") ➞ "Not the same"
### Notes
* Dictionaries are an unordered data type.
* Double quotes may be helpful.
* `KeyError` can occur when trying to access a dictionary key that doesn't exist.
"""
def check(d1, d2, k):
try: return ["Not the same", True][d1[k] == d2[k]]
except KeyError: return "One's empty"
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
74b45ad1bf32de78395b3bc6704098a560d4dad1
|
ce083128fa87ca86c65059893aa8882d088461f5
|
/python/pytest-labs/mock_lab_1.py
|
205cbdd5b5cb3ab89040e1c723b30e8800b68fc2
|
[] |
no_license
|
marcosptf/fedora
|
581a446e7f81d8ae9a260eafb92814bc486ee077
|
359db63ff1fa79696b7bc803bcfa0042bff8ab44
|
refs/heads/master
| 2023-04-06T14:53:40.378260
| 2023-03-26T00:47:52
| 2023-03-26T00:47:52
| 26,059,824
| 6
| 5
| null | 2022-12-08T00:43:21
| 2014-11-01T18:48:56
| null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
#!/usr/bin/env python2
#encoding: UTF-8
import mock
class Target(object):
def apply(value, are_you_sure):
if are_you_sure:
return value
else:
return None
def method(target, value):
return target.apply(value)
#pytest using mock.Mock() instance
def test_method():
target = mock.Mock()
method(target, "value")
target.apply.assert_called_with("value")
|
[
"marcosptf@yahoo.com.br"
] |
marcosptf@yahoo.com.br
|
be7503afcbfae63f32619f134faf846ec33a132d
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/meetup/17857a6adde04647acc54269ab6ef4be.py
|
84c22eae0a99b1a54a20f6aa508a6a593c16744c
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
from collections import defaultdict
from datetime import date, timedelta
def meetup_day(year, month, day_name, date_type):
weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday']
operators = {
'1st': lambda x: x[0],
'2nd': lambda x: x[1],
'3rd': lambda x: x[2],
'4th': lambda x: x[3],
'last': lambda x: x[-1],
'teenth': lambda x: [d for d in x if 13 <= d <= 19][0],
}
data = defaultdict(list)
day = date(year=year, month=month, day=1)
while day.month == month:
data[weekdays[day.weekday()]].append(day.day)
day += timedelta(1)
return date(year=year, month=month, day=operators[date_type](data[day_name]))
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
47d8e02e074f1b33228838b15e10ea23d3c6ee86
|
4fca17a3dbc3e74ba7e46bd7869eb6d138e4c422
|
/_0681_Next_Closest_Time.py
|
6c11bbad0971087cef6f5ea28aee8b0e18f175eb
|
[] |
no_license
|
mingweihe/leetcode
|
a2cfee0e004627b817a3c0321bb9c74128f8c1a7
|
edff905f63ab95cdd40447b27a9c449c9cefec37
|
refs/heads/master
| 2021-06-19T07:46:46.897952
| 2021-05-02T05:13:17
| 2021-05-02T05:13:17
| 205,740,338
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
class Solution(object):
def nextClosestTime(self, time):
"""
:type time: str
:rtype: str
"""
def helper(start, cur, pool):
if start == 4:
left, right = cur[:2], cur[2:]
hour, minute = int(left), int(right)
if hour > 23 or minute > 59: return
cur_digit = int(left + right)
if cur_digit <= self.original_digit: return
cur_diff = cur_digit - self.original_digit
if cur_diff < self.diff:
self.diff = cur_diff
self.res = left + ':' + right
return
for c in pool: helper(start + 1, cur + c, pool)
self.res = min(time) * 2 + ':' + min(time) * 2
self.original_digit = int(time.replace(':', ''))
self.diff = float('inf')
helper(0, '', set(time) - {':'})
return self.res
|
[
"hemingwei2017@gmail.com"
] |
hemingwei2017@gmail.com
|
c5b50c84a27561cd42e497c41900c80a6f77b56c
|
30de452d89eacf48f61ceddfaff86aa62d505507
|
/traits/observation/_i_notifier.py
|
2b28ed8a676667350285c54a1f6916280271f97f
|
[
"BSD-3-Clause",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
odidev/traits
|
92224376b5444a7a5f805b474f0aa53ac1ca7cd2
|
52f4d00248cec5dbf0826de4e846b4ad83cf072e
|
refs/heads/master
| 2023-06-18T16:53:43.850534
| 2021-07-14T05:48:46
| 2021-07-14T05:48:46
| 388,075,888
| 0
| 0
|
NOASSERTION
| 2021-07-21T10:31:06
| 2021-07-21T10:11:23
| null |
UTF-8
|
Python
| false
| false
| 1,620
|
py
|
# (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import abc
class INotifier(abc.ABC):
""" Interface for all notifiers.
An instance of notifier must be a callable, i.e. ``__call__`` must be
implemented and cannot be None. The signature of that callable should be
compatible with the observables the notifier will be given to. This
interface does not define what that signature should be.
"""
def __call__(self, *args, **kwargs):
""" Called by an observable.
The signature is not restricted by the interface.
"""
raise NotImplementedError("__call__ must be implemented.")
def add_to(self, observable):
""" Add this notifier to the observable.
Parameters
----------
observable : IObservable
"""
raise NotImplementedError("add_to must be implemented.")
def remove_from(self, observable):
""" Remove this notifier or a notifier equivalent to this one
from the observable.
Parameters
----------
observable : IObservable
Raises
------
NotifierNotFound
If the notifier cannot be found.
"""
raise NotImplementedError("remove_from must be implemented.")
|
[
"noreply@github.com"
] |
odidev.noreply@github.com
|
10e9fdf6a6b34922bef66358b5ff457a52a28977
|
1d0e36f710ed936d9bec3d88b69edd8a26b62823
|
/examples/ble_uart_echo_client.py
|
ce36ac80691af1a7338da4730e0fbc2c9a45008c
|
[
"MIT"
] |
permissive
|
dglaude/Adafruit_CircuitPython_BLE
|
c0336787bbf739ddad9d078eab4edc2a80530bd4
|
e8f72b053af8cfcde0c07040a5f2feecd4ca585b
|
refs/heads/master
| 2020-12-12T17:54:52.632742
| 2020-01-09T03:09:22
| 2020-01-14T23:18:46
| 234,190,805
| 0
| 0
|
MIT
| 2020-01-15T23:05:42
| 2020-01-15T23:05:41
| null |
UTF-8
|
Python
| false
| false
| 1,086
|
py
|
"""
Used with ble_uart_echo_test.py. Transmits "echo" to the UARTService and receives it back.
"""
import time
from adafruit_ble import BLERadio
from adafruit_ble.advertising.standard import ProvideServicesAdvertisement
from adafruit_ble.services.nordic import UARTService
ble = BLERadio()
while True:
while ble.connected and any(UARTService in connection for connection in ble.connections):
for connection in ble.connections:
if UARTService not in connection:
continue
print("echo")
uart = connection[UARTService]
uart.write(b"echo")
# Returns b'' if nothing was read.
one_byte = uart.read(4)
if one_byte:
print(one_byte)
print()
time.sleep(1)
print("disconnected, scanning")
for advertisement in ble.start_scan(ProvideServicesAdvertisement, timeout=1):
if UARTService not in advertisement.services:
continue
ble.connect(advertisement)
print("connected")
break
ble.stop_scan()
|
[
"scott@tannewt.org"
] |
scott@tannewt.org
|
5759a3994045c73ec308fd6d0a0209db7f485d10
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/380/usersdata/328/74024/submittedfiles/testes.py
|
d9f0fad052a8e893b10ab6e61accf13576dfb507
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
a=5
print('%.2f'%a)
a=30
b=5
c=10
if a<b<c:
print('comando 1')
else:
if a<c<b:
print('comando 2')
else:
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
9bf5f51bb15906ebe54e8158ffa8d0e1abcdfd05
|
2f6817fc8f6ddb48f5f88c913d8e40b672fc3dbf
|
/MLP/quiz11-3.py
|
331fe654ca3d83ef9ffd1dcf2ebdbcc9879d2b59
|
[] |
no_license
|
cutz-j/TodayILearned
|
320b5774de68a0f4f68fda28a6a8b980097d6ada
|
429b24e063283a0d752ccdfbff455abd30ba3859
|
refs/heads/master
| 2020-03-23T17:34:51.389065
| 2018-11-24T08:49:41
| 2018-11-24T08:49:41
| 141,865,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,157
|
py
|
## Q13: K-means ##
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist, pdist
## 파일 전처리 ##
def file_open(file_name):
## file_open --> np.array ##
file_open = open(file_name, 'r')
all_data = []
for i in file_open.readlines():
all_data.append(i.strip('\n').split(','))
all_data = np.array(all_data) # shape(9835, None)
return all_data
all_data = file_open("d:/data/prac/groceries.csv")
def numbering(all_data):
## product를 dict에 넣으면서 numbering ##
global all_item_num
k = 0
all_dict = {}
for buy in all_data:
for product in buy:
if product in all_dict:
continue
else:
all_dict[product] = k
k += 1
all_item_num = k
for i in all_data:
for k in range(len(i)):
i[k] = all_dict[i[k]]
return all_data, all_dict
all_transaction = len(all_data) # 전체 거래수 9835건
all_item_num = 0 # 169개
all_data, all_dict = numbering(all_data) # 전체 아이템 개수 169개
## one-hot ##
def one_hot(data):
## 구매자마다 벡터화 시키기 위해 one-hot-encoding ## --> X: shape(9835, 169)
one_hot = np.zeros([all_transaction, all_item_num], dtype=np.int32)
for i in range(len(all_data)):
for j in all_data[i]:
one_hot[i,j] = 1
return one_hot
x_one_hot = one_hot(all_data) # one-hot
## split ##
x_train, x_test = x_one_hot[:9800, :], x_one_hot[9800:, :]
## Kmeans ##
# n_cluster = 10, max_iter=3000 #
k_means = KMeans(n_clusters=10, max_iter=3000, random_state=77)
k_means.fit(x_train)
k_cluster = k_means.predict(x_test)
ss = silhouette_score(x_train, k_means.labels_, metric='euclidean')
print("테스트 데이터 35명의 클러스터: \n", k_cluster)
print("\nsilhouette_score: ", ss)
|
[
"cutz309@gmail.com"
] |
cutz309@gmail.com
|
90e081344e37878f7f20b3dfb85f48791ce8604c
|
1fe4f9eb9b1d756ad17e1ff6585e8ee7af23903c
|
/saleor/store/migrations/0003_specialpage.py
|
5f0354beda8b80442f5c4eb27b7a679dbb897729
|
[
"BSD-3-Clause"
] |
permissive
|
Chaoslecion123/Diver
|
ab762e7e6c8d235fdb89f6c958488cd9b7667fdf
|
8c5c493701422eada49cbf95b0b0add08f1ea561
|
refs/heads/master
| 2022-02-23T10:43:03.946299
| 2019-10-19T23:39:47
| 2019-10-19T23:39:47
| 216,283,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
# Generated by Django 2.2 on 2019-04-17 02:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('site', '0022_auto_20190413_2016'),
('page', '0007_auto_20190225_0252'),
('store', '0002_socialnetwork'),
]
operations = [
migrations.CreateModel(
name='SpecialPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('about', 'About'), ('faq', 'FAQ'), ('legal', 'Terms and Conditions'), ('privacy', 'Privacy and Cookies'), ('accessibility', 'Accessibility')], max_length=32)),
('page', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='site_settings', related_query_name='site_setting', to='page.Page')),
('site_settings', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='special_pages', related_query_name='special_page', to='site.SiteSettings')),
],
options={
'unique_together': {('site_settings', 'type')},
},
),
]
|
[
"chaoslecion71@gmail.com"
] |
chaoslecion71@gmail.com
|
1e1eaa7a694586422bdc9da3b230971d98ace025
|
8da91c26d423bacbeee1163ac7e969904c7e4338
|
/pyvisdk/do/host_wwn_conflict_event.py
|
faa99ccf0a169abcf46f9c22e5db93ed38d7722e
|
[] |
no_license
|
pexip/os-python-infi-pyvisdk
|
5d8f3a3858cdd61fb76485574e74ae525cdc7e25
|
1aadea0afbc306d09f6ecb9af0e683dbbf961d20
|
refs/heads/master
| 2023-08-28T02:40:28.789786
| 2020-07-16T04:00:53
| 2020-07-16T04:00:53
| 10,032,240
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def HostWwnConflictEvent(vim, *args, **kwargs):
'''This event records a conflict of host WWNs (World Wide Name).'''
obj = vim.client.factory.create('{urn:vim25}HostWwnConflictEvent')
# do some validation checking...
if (len(args) + len(kwargs)) < 5:
raise IndexError('Expected at least 6 arguments got: %d' % len(args))
required = [ 'wwn', 'chainId', 'createdTime', 'key', 'userName' ]
optional = [ 'conflictedHosts', 'conflictedVms', 'changeTag', 'computeResource',
'datacenter', 'ds', 'dvs', 'fullFormattedMessage', 'host', 'net', 'vm',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
[
"jmb@pexip.com"
] |
jmb@pexip.com
|
4a47fffa44259b959487191994bc1233b3491c11
|
4f75cc33b4d65d5e4b054fc35b831a388a46c896
|
/.history/app_20210903124401.py
|
2579fc4075f93959be51f7bd4b7d23610331e820
|
[] |
no_license
|
Lr-2002/newpage
|
c3fe2acc451e24f6408996ea1271c61c321de702
|
c589ad974e7100aa9b1c2ccc095a959ff68069b6
|
refs/heads/main
| 2023-09-03T06:13:53.428236
| 2021-11-23T10:41:21
| 2021-11-23T10:41:21
| 402,606,000
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 869
|
py
|
from flask import Flask ,render_template,url_for
from flask_sqlalchemy import
app = Flask(__name__)
name = 'Grey Li'
movies = [
{'title': 'My Neighbor Totoro', 'year': '1988'},
{'title': 'Dead Poets Society', 'year': '1989'},
{'title': 'A Perfect World', 'year': '1993'},
{'title': 'Leon', 'year': '1994'},
{'title': 'Mahjong', 'year': '1996'},
{'title': 'Swallowtail Butterfly', 'year': '1996'},
{'title': 'King of Comedy', 'year': '1999'},
{'title': 'Devils on the Doorstep', 'year': '1999'},
{'title': 'WALL-E', 'year': '2008'},
{'title': 'The Pork of Music', 'year': '2012'},
]
# @app.route('/static/<name>')
# def static(name):
# # url_for('static')
# return name
@app.route('/')
def hello():
return render_template('index.html',name=name,movies = movies)
# if __name__ == '__main__':
# app.run()
|
[
"2629651228@qq.com"
] |
2629651228@qq.com
|
f4e6f2a11be9b1c9b26e680848c56ec23e147339
|
e78154abbb8bacf5afccda9da371684cbeabad36
|
/popego/popserver/build/lib/popserver/tests/agents/test_lastfm.py
|
96e4e5360546f9480b42ef1450462b3d3a5daae1
|
[
"BSD-3-Clause"
] |
permissive
|
enterstudio/popego
|
1a196fabc374c0f45764e5c74bd7752236424040
|
2d09e793d9d2f297139edb325b8a70ddda9b2705
|
refs/heads/master
| 2021-04-09T16:39:40.781634
| 2016-10-14T16:53:47
| 2016-10-14T16:53:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,723
|
py
|
# -*- coding: utf-8 -*-
__docformat__='restructuredtext'
from popserver.tests.nodb_model import *
from popserver.tests import *
from fixture import DataTestCase
from popserver.tests import popfixtures
from popserver.agents.lastfm_agent import LastFmAgent
from popserver.agents.lastfm_client import LastFMClient
import popserver.agents
import types
import unittest
class TestLastFmClient(unittest.TestCase):
def setUp(self):
self._restoreMethod = popserver.agents.lastfm_client.LastFMClient._getFeed
LastFMClient._getFeed = types.MethodType(mock_lastfm_getFeed, None, LastFMClient)
self.client = LastFMClient()
def tearDown(self):
LastFMClient._getFeed = types.MethodType(self._restoreMethod, None, LastFMClient)
def testRecentTracks(self):
t = self.client.getRecentTracksForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert type(t[0]) == type({})
assert t[0]['artist'] == 'Willie Bobo'
def testTopTracks(self):
t = self.client.getTopTracksForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert type(t[0]) == type({})
assert t[0]['artist'] == 'Brian Wilson'
assert t[0]['name'] == 'Our Prayer Gee'
def testTopArtists(self):
t = self.client.getTopArtistsForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert type(t[0]) == type({})
assert t[0]['name'] == 'The Beatles'
def testUserTagsForTrack(self):
t = self.client.getUserTagsForTrack('maristaran', 'Brian Wilson', 'Our Prayer Gee')
assert type(t) == type([])
assert len(t) == 1
assert t == ['bombastic']
def testTopArtistsForUser(self):
t = self.client.getTopArtistsForUser('maristaran')
assert type(t) == type([])
assert len(t) == 1
assert t[0]['name'] == 'The Beatles'
def testTopTagsForTrack(self):
t = self.client.getTopTagsForTrack('Willie Bobo', 'Funky Sneakers')
assert type(t) == type([])
assert len(t) == 0
def testGetArtistData(self):
t = self.client.getArtistData('Brian Wilson')
assert type(t) == type({})
assert t['name'] == 'Brian Wilson'
# TODO: tests para el agente
# class TestLastFmAgent(TestModel, DataTestCase):
# fixture = dbfixture
# datasets = [popfixtures.UserData, popfixtures.ServiceTypeData, popfixtures.ServiceData, popfixtures.AccountData]
# def setUp(self):
# TestModel.setUp(self)
# DataTestCase.setUp(self)
# self._restoreMethod = popserver.agents.lastfm_client.LastFMClient._getFeed
# LastFMClient._getFeed = types.MethodType(mock_lastfm_getFeed, None, LastFMClient)
# self.agent = LastFmAgent()
# self.user = self.data.UserData.dartagnan
# self.lastfm_svc = self.data.ServiceData.lastfm
# self.account = Account.get_by(user_id=self.user.id, service_id=self.lastfm_svc.id)
# def tearDown(self):
# dbsession.clear()
# DataTestCase.tearDown(self)
# TestModel.tearDown(self)
# LastFMClient._getFeed = types.MethodType(self._restoreMethod, None, LastFMClient)
# def test_getUserGraph(self):
# r = self.agent.getUserGraph(self.account)
# assert len(r) == 3 # grupos: top artists, top tracks y recently_listened
# assert map(type, r) == [popserver.model.ItemGroup, popserver.model.ItemGroup, popserver.model.ItemGroup]
# assert map(lambda g: type(g.items[0]), r) == [popserver.model.UserItem, popserver.model.UserItem,popserver.model.UserItem]
# assert map(lambda g: len(g.items), r) == [1, 1, 1]
# top_artists = r[0]
# assert type(top_artists.items[0].item) == popserver.model.Artist
# assert top_artists.items[0].item.title == 'The Beatles'
# top_tracks = r[1]
# assert type(top_tracks.items[0].item) == popserver.model.Song
# assert top_tracks.items[0].item.title == 'Our Prayer Gee'
# assert top_tracks.items[0].item.artist.title == 'Brian Wilson'
# recently_listened = r[2]
# assert type(recently_listened.items[0].item) == popserver.model.Song
# assert recently_listened.items[0].item.title == 'Funky Sneakers'
# assert recently_listened.items[0].item.artist.title == 'Willie Bobo'
# assert True
def mock_lastfm_getFeed(self, url):
samples = {
'http://ws.audioscrobbler.com/1.0/user/maristaran/recenttracks.xml' : 'recenttracks.xml',
'http://ws.audioscrobbler.com/1.0/artist/Willie%2BBobo/similar.xml' : 'willie-bobo-similar.xml',
'http://ws.audioscrobbler.com/1.0/track/Willie%2BBobo/Funky%2BSneakers/toptags.xml' : 'funky-sneakers-toptags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/tracktags.xml?artist=Willie+Bobo&track=Funky+Sneakers' : 'funky-sneakers-tracktags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/toptracks.xml' : 'toptracks.xml',
'http://ws.audioscrobbler.com/1.0/artist/Brian%2BWilson/similar.xml' : 'brian-wilson-similar.xml',
'http://ws.audioscrobbler.com/1.0/track/Brian%2BWilson/Our%2BPrayer%2BGee/toptags.xml' : 'our-prayer-gee-toptags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/tracktags.xml?artist=Brian+Wilson&track=Our+Prayer+Gee' : 'maristaran-our-prayer-gee-toptags.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/topartists.xml' : 'topartists.xml',
'http://ws.audioscrobbler.com/1.0/artist/The%2BBeatles/similar.xml' : 'beatles-similar.xml',
'http://ws.audioscrobbler.com/1.0/user/maristaran/artisttags.xml?artist=The+Beatles' : 'maristaran-beatles-tags.xml'
}
import xml.dom.minidom
if samples[url] == 404:
import urllib2
raise urllib2.HTTPError
else:
return xml.dom.minidom.parse(popserver.tests.__path__[0] + '/samples/lastfm/' + samples[url])
# class TestLastfmAgent(DataTestCase, TestModel):
# fixture = dbfixture
# datasets = [popfixtures.UserData, popfixtures.ServiceTypeData, popfixtures.ServiceData, popfixtures.AccountData]
# def setUp(self):
# TestModel.setUp(self)
# DataTestCase.setUp(self)
# self.user = User.get_by(username='darty')
# self.lastfm_svc = Service.get_by(name='Last.FM')
# self.account = Account.get_by(user=self.user, service=self.lastfm_svc)
# self.agent = self.lastfm_svc.getAgent()
# def tearDown(self):
# DataTestCase.tearDown(self)
# TestModel.tearDown(self)
# LastFmAgent._getFeed = orig_getFeed
|
[
"santisiri@gmail.com"
] |
santisiri@gmail.com
|
6c0d1cac4f7d4207631446b5ea39072ab40066dd
|
18319a52cce2b3f3a3607a18f45cbd5933ad8e31
|
/venv/Lib/site-packages/bottle_sqlite.py
|
f568b58a6389cfdd11c1b2072cadb07f55fc79a2
|
[] |
no_license
|
AmithRajMP/Web-Tech-Assignment--2-
|
8d9e56ef6bd302661654e32182964b9fe5644801
|
ec7f410868f8936199bec19f01fce4ad6e081e79
|
refs/heads/master
| 2020-03-18T01:54:46.662732
| 2018-09-16T11:06:04
| 2018-09-16T11:06:04
| 134,165,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,833
|
py
|
'''
Bottle-sqlite is a plugin that integrates SQLite3 with your Bottle
application. It automatically connects to a database at the beginning of a
request, passes the database handle to the route callback and closes the
connection afterwards.
To automatically detect routes that need a database connection, the plugin
searches for route callbacks that require a `db` keyword argument
(configurable) and skips routes that do not. This removes any overhead for
routes that don't need a database connection.
Usage Example::
import bottle
from bottle.ext import sqlite
app = bottle.Bottle()
plugin = sqlite.Plugin(dbfile='/tmp/test.db')
app.install(plugin)
@app.route('/show/:item')
def show(item, db):
row = db.execute('SELECT * from items where name=?', item).fetchone()
if row:
return template('showitem', page=row)
return HTTPError(404, "Page not found")
'''
__author__ = "Marcel Hellkamp"
__version__ = '0.1.3'
__license__ = 'MIT'
### CUT HERE (see setup.py)
import sqlite3
import inspect
import bottle
# PluginError is defined to bottle >= 0.10
if not hasattr(bottle, 'PluginError'):
class PluginError(bottle.BottleException):
pass
bottle.PluginError = PluginError
class SQLitePlugin(object):
''' This plugin passes an sqlite3 database handle to route callbacks
that accept a `db` keyword argument. If a callback does not expect
such a parameter, no connection is made. You can override the database
settings on a per-route basis. '''
name = 'sqlite'
api = 2
''' python3 moves unicode to str '''
try:
str
except NameError:
str = str
def __init__(self, dbfile=':memory:', autocommit=True, dictrows=True,
keyword='db', text_factory=str):
self.dbfile = dbfile
self.autocommit = autocommit
self.dictrows = dictrows
self.keyword = keyword
self.text_factory = text_factory
def setup(self, app):
''' Make sure that other installed plugins don't affect the same
keyword argument.'''
for other in app.plugins:
if not isinstance(other, SQLitePlugin):
continue
if other.keyword == self.keyword:
raise PluginError("Found another sqlite plugin with "
"conflicting settings (non-unique keyword).")
elif other.name == self.name:
self.name += '_%s' % self.keyword
def apply(self, callback, route):
# hack to support bottle v0.9.x
if bottle.__version__.startswith('0.9'):
config = route['config']
_callback = route['callback']
else:
config = route.config
_callback = route.callback
# Override global configuration with route-specific values.
if "sqlite" in config:
# support for configuration before `ConfigDict` namespaces
g = lambda key, default: config.get('sqlite', {}).get(key, default)
else:
g = lambda key, default: config.get('sqlite.' + key, default)
dbfile = g('dbfile', self.dbfile)
autocommit = g('autocommit', self.autocommit)
dictrows = g('dictrows', self.dictrows)
keyword = g('keyword', self.keyword)
text_factory = g('keyword', self.text_factory)
# Test if the original callback accepts a 'db' keyword.
# Ignore it if it does not need a database handle.
argspec = inspect.getargspec(_callback)
if keyword not in argspec.args:
return callback
def wrapper(*args, **kwargs):
# Connect to the database
db = sqlite3.connect(dbfile)
# set text factory
db.text_factory = text_factory
# This enables column access by name: row['column_name']
if dictrows:
db.row_factory = sqlite3.Row
# Add the connection handle as a keyword argument.
kwargs[keyword] = db
try:
rv = callback(*args, **kwargs)
if autocommit:
db.commit()
except sqlite3.IntegrityError as e:
db.rollback()
raise bottle.HTTPError(500, "Database Error", e)
except bottle.HTTPError as e:
raise
except bottle.HTTPResponse as e:
if autocommit:
db.commit()
raise
finally:
db.close()
return rv
# Replace the route callback with the wrapped one.
return wrapper
Plugin = SQLitePlugin
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
f06e381bb59634a56edccfa28b2c65ece1dd503f
|
0cc075bb2b1c30c257dc5eafa1e309ee9f5bfadc
|
/tests/__init__.py
|
ebd6f8d11eadd944b39f2ea50e1f67c0fadf85b2
|
[] |
no_license
|
gcaaa31928/BusTrackerTaipeiAppiumTesting
|
d2f49e39db41b9507d8f05338defed7c72f3e456
|
1363ca72e10369affd4397223d150556b9172995
|
refs/heads/master
| 2021-01-21T14:48:32.558734
| 2016-06-21T19:24:39
| 2016-06-21T19:24:39
| 57,986,263
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
from . import test_basic
from . import test_app
from . import test_nearby_page
from . import test_nearby_page_stations
from .import test_nearby_page_info
|
[
"gcaaa31928@gmail.com"
] |
gcaaa31928@gmail.com
|
b78dfb29942edd235434f9bf380f779d177759f5
|
06ad345f69a2f91b5c5a730bb4943f04cba93a44
|
/Pygame/08_functions_and_graphics.py
|
17721ec3ef30096ed81712cd5443420abe7f5daf
|
[] |
no_license
|
whyj107/Python
|
61bd58d202350616a322b7b75086976f354bda9b
|
3c3649ca8c6ac6908ac14d6af201d508cc4fbf30
|
refs/heads/master
| 2021-04-13T23:49:13.470657
| 2021-03-27T10:08:39
| 2021-03-27T10:08:39
| 249,195,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,593
|
py
|
"""
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
Explanation video: http://youtu.be/_XdrKSDmzqA
"""
# Import a library of functions called 'pygame'
import pygame
def draw_snowman(screen, x, y):
""" --- Function for a snowman ---
Define a function that will draw a snowman at a certain location.
"""
pygame.draw.ellipse(screen, WHITE, [35 + x, 0 + y, 25, 25])
pygame.draw.ellipse(screen, WHITE, [23 + x, 20 + y, 50, 50])
pygame.draw.ellipse(screen, WHITE, [0 + x, 65 + y, 100, 100])
# Initialize the game engine
pygame.init()
# Define the colors we will use in RGB format
BLACK = [0, 0, 0]
WHITE = [255, 255, 255]
# Set the height and width of the screen
size = [400, 500]
screen = pygame.display.set_mode(size)
# Loop until the user clicks the close button.
done = False
clock = pygame.time.Clock()
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# Clear the screen and set the screen background
screen.fill(BLACK)
# Snowman in upper left
draw_snowman(screen, 10, 10)
# Snowman in upper right
draw_snowman(screen, 300, 10)
# Snowman in lower left
draw_snowman(screen, 10, 300)
# Go ahead and update the screen with what we've drawn.
# This MUST happen after all the other drawing commands.
pygame.display.flip()
# This limits the while loop to a max of 60 times per second.
# Leave this out and we will use all CPU we can.
clock.tick(60)
|
[
"60024292+whyj107@users.noreply.github.com"
] |
60024292+whyj107@users.noreply.github.com
|
0b90c23e619a3ae46e07bc5db9c5a8cdcf249e3e
|
4a76ac7ad1aaeec44729ab6d5b121b1cae0d910c
|
/Week 9/pathSum3.py
|
4ff31f3c3811e17e09b8c3e224c844bfd576a84c
|
[] |
no_license
|
kalmad99/CompetitiveProgramming
|
2d825e839faa9e13ef43dbb45498bd3eef6723ab
|
6cbb1f12f7670d0016fa2af8f2dd597d9123070d
|
refs/heads/main
| 2023-03-25T20:18:23.389396
| 2021-03-24T21:36:52
| 2021-03-24T21:36:52
| 325,816,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,225
|
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def pathSum(self, root: TreeNode, sum: int) -> int:
total, result, counter = {0: 1}, 0, [0]
if not root:
return 0
else:
self.dfs(root, total, result, counter, sum)
return counter[0]
def dfs(self, node, total, result, counter, k):
if not node.left and not node.right:
if result + node.val - k in total:
counter[0] += total[result + node.val - k]
return
else:
result += node.val
if result - k in total:
counter[0] += total[result - k]
if result in total:
total[result] += 1
else:
total[result] = 1
if node.left:
self.dfs(node.left, total, result, counter, k)
if node.right:
self.dfs(node.right, total, result, counter, k)
if total[result] == 1:
total.pop(result)
else:
total[result] -= 1
result -= node.val
|
[
"kalemesfin12@gmail.com"
] |
kalemesfin12@gmail.com
|
edc85322d46ee344788712c6051ad5c5a397f1bf
|
03f9b8bdea312636afb4df3737b55cb0cc4b21ff
|
/AddTwoNumbers.py
|
7895bccd673b9da3b411d758be1b46902a6bbfaa
|
[] |
no_license
|
ellinx/LC-python
|
f29dd17bbe15407ba0d06ad68386efdc9a343b56
|
9190d3d178f1733aa226973757ee7e045b7bab00
|
refs/heads/master
| 2021-06-01T15:21:24.379811
| 2020-10-29T04:37:07
| 2020-10-29T04:37:07
| 132,704,788
| 1
| 1
| null | 2019-05-15T03:26:11
| 2018-05-09T05:13:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,404
|
py
|
"""
You are given two non-empty linked lists representing two non-negative integers.
The digits are stored in reverse order and each of their nodes contain a single digit.
Add the two numbers and return it as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
Example:
Input: (2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 0 -> 8
Explanation: 342 + 465 = 807.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
dummy = ListNode(0)
cur = dummy
c = 0
while l1 is not None and l2 is not None:
s = l1.val+l2.val+c
c = s//10
s %= 10
cur.next = ListNode(s)
cur, l1, l2 = cur.next, l1.next, l2.next
while l1 is not None:
s = l1.val+c
c = s//10
s %= 10
cur.next = ListNode(s)
cur, l1 = cur.next, l1.next
while l2 is not None:
s = l2.val+c
c = s//10
s %= 10
cur.next = ListNode(s)
cur, l2 = cur.next, l2.next
if c>0:
cur.next = ListNode(c)
return dummy.next
|
[
"ellin.xll@gmail.com"
] |
ellin.xll@gmail.com
|
a8b91d81f59792d3e00a09a34b0fe6942174e581
|
a0a8fbda5657169ad180ae7008a505caff500182
|
/courses/admin.py
|
8e17ab02c432207b3c3e296d96d3896576f94f59
|
[] |
no_license
|
WellingtonIdeao/educa-learning
|
4e32d88c741dd7e2fbe45a4194c0ee1970697ca0
|
393c4849e59ee7533e7048d75b42aea2e3e64121
|
refs/heads/main
| 2023-06-26T04:29:50.741232
| 2021-07-24T18:13:28
| 2021-07-24T18:13:28
| 386,041,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
from django.contrib import admin
from .models import Subject, Course, Module
# use memcache admin index site
admin.site.index_template = 'memcache_status/admin_index.html'
@admin.register(Subject)
class SubjectAdmin(admin.ModelAdmin):
list_display = ('title', 'slug')
prepopulated_fields = {'slug': ('title',)}
class ModuleInline(admin.StackedInline):
model = Module
@admin.register(Course)
class CourseAdmin(admin.ModelAdmin):
list_display = ('title', 'subject', 'created')
list_filter = ('created', 'subject')
search_fields = ('title', 'overview')
prepopulated_fields = {'slug': ('title',)}
inlines = [ModuleInline]
|
[
"wellington.ideao@gmail.com"
] |
wellington.ideao@gmail.com
|
8b34bc1e8dd03cd34cb5b8d1adc629cdc9f628c6
|
b7b2f80ab5e1ee0ea028576e3014b62b8d3a8d7e
|
/pyserv/pyserv-010/pycli_enc.py
|
4a49b1d65c35c16af0fadd3f6430ecdc0b665737
|
[] |
no_license
|
pglen/pgpygtk
|
4d1405478a714f003984cf3e3db04ff1f767470b
|
33f58010e304f1a312f2356de453ecedb7aa21ef
|
refs/heads/master
| 2021-01-22T01:18:52.238415
| 2019-01-01T01:37:24
| 2019-01-01T01:37:24
| 102,215,955
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
#!/usr/bin/env python
# ------------------------------------------------------------------------
# Test client for the pyserv project. Encrypt test.
import os, sys, getopt, signal, select, socket, time, struct
import random, stat
import pyserv.pydata, pyserv.pyservsup, pyserv.pycrypt
from pyserv.pyclisup import *
# ------------------------------------------------------------------------
# Globals
version = 1.0
# ------------------------------------------------------------------------
# Functions from command line
def phelp():
print
print "Usage: " + os.path.basename(sys.argv[0]) + " [options]"
print
print "Options: -d level - Debug level 0-10"
print " -p port - Port to use (default: 9999)"
print " -v - Verbose"
print " -q - Quiet"
print " -h - Help"
print
sys.exit(0)
def pversion():
print os.path.basename(sys.argv[0]), "Version", version
sys.exit(0)
# option, var_name, initial_val, function
optarr = \
["d:", "pgdebug", 0, None], \
["p:", "port", 9999, None], \
["v", "verbose", 0, None], \
["q", "quiet", 0, None], \
["t", "test", "x", None], \
["V", None, None, pversion], \
["h", None, None, phelp] \
conf = Config(optarr)
# ------------------------------------------------------------------------
if __name__ == '__main__':
args = conf.comline(sys.argv[1:])
if len(args) == 0:
ip = '127.0.0.1'
else:
ip = args[0]
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
init_handler(s1)
try:
s1.connect((ip, conf.port))
except:
print "Cannot connect to:", ip + ":" + str(conf.port), sys.exc_info()[1]
sys.exit(1)
client(s1, "ver", conf.verbose)
client(s1, "user peter", conf.verbose)
client(s1, "pass 1234", conf.verbose)
xkey = "" #"1234"
#client(s1, "ekey " + xkey, conf.verbose)
client(s1, "ver ", conf.verbose, xkey)
client(s1, "ekey ", conf.verbose, xkey)
xkey = ""
client(s1, "ver ", conf.verbose, xkey)
client(s1, "quit", conf.verbose, xkey)
s1.close();
sys.exit(0)
|
[
"peterglen99@gmail.com"
] |
peterglen99@gmail.com
|
43a7a2f7b8f4cd2cf1ef4e1d2914694c7c2d63f4
|
6a95b330e1beec08b917ff45eccfd6be3fd4629f
|
/kubernetes/test/test_v1beta2_replica_set_spec.py
|
90125f05d99575fb7f67ab126e67908b79902a85
|
[
"Apache-2.0"
] |
permissive
|
TokkoLabs/client-python
|
f4a83d6540e64861b59e322c951380a670578d7f
|
f1ad9c6889105d8510472606c98f8d3807f82020
|
refs/heads/master
| 2023-07-14T01:36:46.152341
| 2017-12-21T21:32:11
| 2017-12-21T21:32:11
| 115,042,671
| 0
| 0
|
Apache-2.0
| 2021-08-06T03:29:17
| 2017-12-21T20:05:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta2_replica_set_spec import V1beta2ReplicaSetSpec
class TestV1beta2ReplicaSetSpec(unittest.TestCase):
""" V1beta2ReplicaSetSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta2ReplicaSetSpec(self):
"""
Test V1beta2ReplicaSetSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta2_replica_set_spec.V1beta2ReplicaSetSpec()
pass
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
d998e32f40906238b53714101a60abb3a69745ed
|
550116c99a6b4572945324dddb7bd70d66f9a8e2
|
/src/Lists_Basics_More_Exercises/04_Battle_Ships.py
|
11666b8448ab02aa5f19bb88e2f831a38ac655d8
|
[] |
no_license
|
ivelinakaraivanova/SoftUniPythonFundamentals
|
810cc92796b335f31bae662fa255de66418fb1fd
|
31bb4bbe817a65400bc444dbc5b692bd2ef94432
|
refs/heads/main
| 2022-12-28T08:53:33.587999
| 2020-10-18T16:15:09
| 2020-10-18T16:15:09
| 305,124,614
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 384
|
py
|
n = int(input())
field = []
for i in range(n):
split_row = list(map(int, input().split(" ")))
field.append(split_row)
attacked = input().split(" ")
ships = 0
for item in attacked:
row = int(item.split("-")[0])
col = int(item.split("-")[1])
if field[row][col] > 0:
field[row][col] -= 1
if field[row][col] == 0:
ships += 1
print(ships)
|
[
"73067985+ivelinakaraivanova@users.noreply.github.com"
] |
73067985+ivelinakaraivanova@users.noreply.github.com
|
9e33d666bbf4ced1f73f62d0f4f02a6ca92f334f
|
938c55df0653b377318cd434f0fedb97036cfe7d
|
/day26/flask_web/app.py
|
a21e33c290a456cd124540302ff1aaf6fe620363
|
[] |
no_license
|
elliajen/pyworks
|
6f754d0caaa4d110549f7704ade72f0002e63adb
|
a24a7c02f338fa8d7cfdab5a0d8bc005532dfa99
|
refs/heads/master
| 2023-08-26T17:27:11.893396
| 2021-10-22T04:28:43
| 2021-10-22T04:28:43
| 402,286,435
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
# 웹서버(local 내 컴퓨터) 가동
from flask import Flask
app = Flask(__name__) #flask 클래스에서 app 객체 생성
@app.route('/') #라우트(경로 설정)
def index():
return "<h1>Hello~ Flask!</h1>" #인텍스페이지에서 문자 출력
@app.route('/login')
def login():
return "<h2>로그인 페이지입니다.</h2>"
@app.route('/member')
def member():
return "<h2>회원가입 페이지입니다.</h2>"
app.run()
|
[
"dmstndhk123@naver.com"
] |
dmstndhk123@naver.com
|
d7c4ef7b34329b7553264d71940737cdf5fc1cdd
|
29bd55d171733586f24f42151d44f4312b6a610e
|
/keras/keras102_lamda.py
|
91b649ecbf888df8c63544cd18c8276d7f322857
|
[] |
no_license
|
votus777/AI_study
|
66ab1da2b8e760d0c52b0ed2b2f74158e14f435b
|
f4e38d95690c8ee84d87c02dc20a1ea59c495f04
|
refs/heads/master
| 2022-12-04T15:52:14.855624
| 2020-08-20T06:12:52
| 2020-08-20T06:12:52
| 262,975,960
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 196
|
py
|
gradient = lambda x : 2*x - 4
def gradient2(x) :
temp = 2*x - 4
return temp
# 두 개의 다른 표현식이다.
x = 3
print(gradient(x))
print(gradient2(x))
|
[
"votus777@users.noreply.github.com"
] |
votus777@users.noreply.github.com
|
039ccf93cec028a95a10e965719e2644dea90629
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part004501.py
|
ed45a8b978dd61582172fdc173ae6454577b7843
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,292
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher96697(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.4.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.4.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher96697._instance is None:
CommutativeMatcher96697._instance = CommutativeMatcher96697()
return CommutativeMatcher96697._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 96696
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
3e87a4ddd10506d80c44ca4532849331eefd3f4b
|
1bebf42f17c558488fce0bea5c58b55fd049c286
|
/django_table_filtering/wsgi.py
|
6962142610ad722b2c01ceb12560021ec1f4c8a0
|
[
"Unlicense"
] |
permissive
|
ckbelvedere/django_table_filtering
|
60ab100fde25e9fbe4e38f7e7b5b4257f45d08fa
|
d82b89a871779b6a000945da14455c9acb8c47bb
|
refs/heads/master
| 2021-01-02T22:30:01.663587
| 2016-11-22T11:22:44
| 2016-11-22T11:22:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
"""
WSGI config for django_table_filtering project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_table_filtering.settings")
application = get_wsgi_application()
|
[
"spapas@gmail.com"
] |
spapas@gmail.com
|
51e8fae298d3258f1bc488df509e29b0599f907b
|
5fe72bb13baf3649058ebe11aa86ad4fc56c69ed
|
/hard-gists/299905/snippet.py
|
463be82ad5d7f9e9ed4f498a8c2cafdb1dd3dfbf
|
[
"Apache-2.0"
] |
permissive
|
dockerizeme/dockerizeme
|
8825fed45ff0ce8fb1dbe34959237e8048900a29
|
408f3fa3d36542d8fc1236ba1cac804de6f14b0c
|
refs/heads/master
| 2022-12-10T09:30:51.029846
| 2020-09-02T13:34:49
| 2020-09-02T13:34:49
| 144,501,661
| 24
| 20
|
Apache-2.0
| 2022-11-21T12:34:29
| 2018-08-12T21:21:04
|
Python
|
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
"Memcached cache backend"
from django.core.cache.backends import memcached
from django.utils.encoding import smart_unicode, smart_str
MIN_COMPRESS_LEN = 150000
class CacheClass(memcached.CacheClass):
def add(self, key, value, timeout=None, min_compress_len=MIN_COMPRESS_LEN):
if isinstance(value, unicode):
value = value.encode('utf-8')
if timeout is None:
timeout = self.default_timeout
return self._cache.add(smart_str(key), value, timeout, min_compress_len)
def set(self, key, value, timeout=None, min_compress_len=MIN_COMPRESS_LEN):
if isinstance(value, unicode):
value = value.encode('utf-8')
if timeout is None:
timeout = self.default_timeout
self._cache.set(smart_str(key), value, timeout)
def set_many(self, data, timeout=None, min_compress_len=MIN_COMPRESS_LEN):
safe_data = {}
for key, value in data.items():
if isinstance(value, unicode):
value = value.encode('utf-8')
safe_data[smart_str(key)] = value
if timeout is None:
timeout = self.default_timeout
self._cache.set_multi(safe_data, timeout, min_compress_len=min_compress_len)
|
[
"42325807+dockerizeme@users.noreply.github.com"
] |
42325807+dockerizeme@users.noreply.github.com
|
d23bdd30e8190210edd9107c2e3d8da1127f0046
|
24c5c46f1d281fc15de7f6b72a5148ae85f89fb4
|
/SRC/unittest/case_interface_bak.py
|
b566f32234c2a6425ea57ff17e5e87a1788e972a
|
[] |
no_license
|
enterpriseih/easyTest
|
22d87c7ffe40fb10a07f7c5cdd505f63dd45adc0
|
43b8d294e898f25055c78313cfece2753352c250
|
refs/heads/master
| 2023-08-23T22:55:14.798341
| 2020-02-11T09:13:43
| 2020-02-11T09:13:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,127
|
py
|
# coding=utf-8
import functools
import json
import unittest as UT
import requests
from SRC import settings
from SRC.common.const import RunStatus, RequestMethod, RequestDataType, RunResult
from SRC.common.decorator import assert_dec, codeException_dec
from SRC.common.exceptions import JsonLoadsException
from SRC.common.fileHelper import isNoneOrEmpty
from SRC.common.loga import putSystemLog
from SRC.common.param import Param
from SRC.common.utils import impClass, isAbsoluteUrl
from SRC.interface_info import projectClass
'''
接口测试用例
'''
class TestCase(UT.TestCase):
def __init__(self, jsonParam):
super(TestCase, self).__init__('runTest')
self.__param = Param(jsonParam['paramsDict'])
self.logger = jsonParam['logger']
self.scriptId = jsonParam['scriptId']
self.projectObj = None # 包含产品信息的对象
self.requestData = None # 请求参数
self.response = None # 响应结果
@property
def param(self):
'''
参数化驱动属性名称
:return:
'''
return self.__param
def setUp(self):
putSystemLog('开始运行脚本%s' % (str(self.__class__)), self.logger)
try:
self.initProjectObj() # 根据不同项目动态初始化对象
self.initRequestData() # 初始化请求参数数据
except JsonLoadsException as e:
putSystemLog(e, self.logger, True, RunStatus.RUNNING, RunResult.ERROR, True, '异常')
except Exception as e:
putSystemLog('[ERROR-2007-0]:测试用例初始化数据引发的异常.请检查参数是否配置正确%s' % e, self.logger, True, RunStatus.RUNNING,
RunResult.ERROR,
True, '异常')
raise
def initRequestData(self):
dataType = self.param.dataType # 请求类型
if dataType == RequestDataType.JSON: # 请求类型为json
self.requestData = self.getFullRequestJsonData()
def initProjectObj(self):
project = impClass(projectClass[self.param.projectClass]) # 动态获取对象
self.projectObj = project(self.param.data.replace("'", "\"")) # 初始化一个项目对象
self.setAbsoluteUrl() # 设置url
def setAbsoluteUrl(self):
if not isAbsoluteUrl(self.param.url):
self.param.url = self.projectObj.getFullUrl(self.param.url) # 获取完整的url
def getFullRequestJsonData(self):
return self.projectObj.getFullRequestData()
@codeException_dec('3')
def runTest(self):
url = self.param.url
method = self.param.method
data = self.requestData
putSystemLog('测试项目简称:%s' % (self.param.projectClass), self.logger, True, RunStatus.RUNNING, RunResult.PASS,
False, '测试项目简称')
putSystemLog('待测试接口:%s' % (url), self.logger, True, RunStatus.RUNNING, RunResult.PASS, False, '待测接口')
putSystemLog('请求方式:%s' % (method), self.logger, True, RunStatus.RUNNING, RunResult.PASS, False, '请求方式')
putSystemLog('请求数据类型:%s' % (self.param.dataType), self.logger, True, RunStatus.RUNNING, RunResult.PASS, False,
'请求数据类型')
putSystemLog('返回数据类型:%s' % (self.param.expectType), self.logger, True, RunStatus.RUNNING, RunResult.PASS, False,
'返回数据类型')
putSystemLog('请求参数:%s' % (json.dumps(self.requestData,indent=4)), self.logger, True, RunStatus.RUNNING, RunResult.PASS, False,
'请求参数')
if method == RequestMethod.GET:
self.response = requests.get(url, params=data)
elif method == RequestMethod.POST:
self.response = requests.post(url, data=data)
def compareResult(self):
param = self.param
r = self.response
expectType = param.expectType
putSystemLog('响应值:%s' % (r.status_code), self.logger, True, RunStatus.RUNNING, RunResult.PASS, True, '响应值')
if expectType == RequestDataType.JSON:
if isNoneOrEmpty(self.param.expect):
pass
else:
compareResult = self.compare()
putSystemLog('Json对比结果:%s,%s' % compareResult[0],compareResult[1], self.logger, True, RunStatus.RUNNING, RunResult.PASS, True,'Json对比结果')
elif expectType == RequestDataType.STRING:
putSystemLog(r.text, self.logger)
def tearDown(self):
self.compareResult()
putSystemLog('脚本运行完毕...', self.logger)
def compare(self):
'''
继承该方法实现对比的重写
:return:
'''
return self.__compareJson(keyCompare=True, formatCompare=True, valueCompare=True, equal=False)
def __compareJson(self, keyCompare=True, formatCompare=True, valueCompare=False, equal=False):
try:
expectJson = json.loads(self.param.expect.strip().replace("'", "\""))
resultJson = json.loads(self.response.text.strip().replace("'", "\""))
compareResult = (False, '')
if keyCompare:
compareResult = self.projectObj.compareKey(expectJson, resultJson, equal)
if not compareResult[0]:
return compareResult
if formatCompare:
compareResult = self.projectObj.compareFormat(expectJson, resultJson, equal)
if not compareResult[0]:
return compareResult
if valueCompare:
compareResult = self.projectObj.compareAllValue(expectJson, resultJson, equal)
if not compareResult[0]:
return compareResult
return compareResult
except:
raise
|
[
"yaolihui0506"
] |
yaolihui0506
|
df7fd05f0a9a837110aabf93f811e9f0522e6c47
|
2bdb128188c40c670fd0a26ca8a447cae58a8848
|
/tests/commands/test_command_sharer.py
|
670f1b14dde21a3390b02c5eb845a98bb1b06c84
|
[] |
no_license
|
cl33per/chat_thief
|
3de834cbb722a36f37c5e033e3f8c7672c325ec4
|
afb7660eacb3b5f476367eb388b4e6981a49e54a
|
refs/heads/master
| 2022-11-14T13:07:49.213617
| 2020-07-01T16:27:45
| 2020-07-01T16:27:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,401
|
py
|
import pytest
from chat_thief.commands.command_sharer import CommandSharer
from chat_thief.models.user import User
from chat_thief.models.command import Command
from tests.support.database_setup import DatabaseConfig
class TestCommandSharer(DatabaseConfig):
def test_share(self):
user = User("fake_user")
user.update_cool_points(1)
command = Command("damn")
friend = User("bizmarkie")
command.allow_user(user.name)
assert user.name in command.users()
assert friend.name not in command.users()
subject = CommandSharer(
user=user.name, command=command.name, friend=friend.name,
)
subject.share()
assert user.name in command.users()
assert friend.name in command.users()
assert user.cool_points() == 0
assert command.cost() == 3
def test_broke_boi_share(self):
user = User("fake_user")
command = Command("damn")
friend = User("bizmarkie")
command.allow_user(user.name)
assert user.name in command.users()
assert friend.name not in command.users()
subject = CommandSharer(
user=user.name, command=command.name, friend=friend.name,
)
subject.share()
assert user.name in command.users()
assert friend.name not in command.users()
assert command.cost() == 1
|
[
"davidmichaelbe@gmail.com"
] |
davidmichaelbe@gmail.com
|
9625b08110ec98c0f1151eaf9b340266f63fcddd
|
579e19a7b861e0549874b6e263c24aa418f3fdfc
|
/samples/QueryChangeNotification.py
|
992136bbfffae0d1ea7199297095a6391b650545
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
33bunny/python-cx_Oracle
|
3f724df9a19ffd3f68e168fe7c57e4e057c8a780
|
26c3a4c2439642f9598bc9b1ca0d6ddf73890428
|
refs/heads/master
| 2021-05-07T15:17:18.873197
| 2017-11-06T20:41:59
| 2017-11-06T20:41:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,702
|
py
|
#------------------------------------------------------------------------------
# Copyright 2016, 2017, Oracle and/or its affiliates. All rights reserved.
#
# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved.
#
# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta,
# Canada. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# QueryChangeNotification.py
# This script demonstrates using query change notification in Python, a
# feature that is available in Oracle 11g. Once this script is running, use
# another session to insert, update or delete rows from the table
# cx_Oracle.TestTempTable and you will see the notification of that change.
#
# This script requires cx_Oracle 5.3 and higher.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import SampleEnv
import threading
import time
registered = True
def callback(message):
global registered
print("Message type:", message.type)
if message.type == cx_Oracle.EVENT_DEREG:
print("Deregistration has taken place...")
registered = False
return
print("Message database name:", message.dbname)
print("Message queries:")
for query in message.queries:
print("--> Query ID:", query.id)
print("--> Query Operation:", query.operation)
for table in query.tables:
print("--> --> Table Name:", table.name)
print("--> --> Table Operation:", table.operation)
if table.rows is not None:
print("--> --> Table Rows:")
for row in table.rows:
print("--> --> --> Row RowId:", row.rowid)
print("--> --> --> Row Operation:", row.operation)
print("-" * 60)
print("=" * 60)
connection = cx_Oracle.Connection(SampleEnv.MAIN_CONNECT_STRING, events = True)
sub = connection.subscribe(callback = callback, timeout = 1800,
qos = cx_Oracle.SUBSCR_QOS_QUERY | cx_Oracle.SUBSCR_QOS_ROWIDS)
print("Subscription:", sub)
print("--> Connection:", sub.connection)
print("--> Callback:", sub.callback)
print("--> Namespace:", sub.namespace)
print("--> Protocol:", sub.protocol)
print("--> Timeout:", sub.timeout)
print("--> Operations:", sub.operations)
print("--> Rowids?:", bool(sub.qos & cx_Oracle.SUBSCR_QOS_ROWIDS))
queryId = sub.registerquery("select * from TestTempTable")
print("Registered query:", queryId)
while registered:
print("Waiting for notifications....")
time.sleep(5)
|
[
"anthony.tuininga@oracle.com"
] |
anthony.tuininga@oracle.com
|
229cda51cb090287639fac9f73866fc0fc07e7f9
|
76742bf1c7dee6a01a0a41402fe734eeb0da3d74
|
/tango_with_django_project/populate_rango.py
|
e7ae46306107f30b3103b2bbed482e688ef88477
|
[] |
no_license
|
Zacharilius/tangoProject
|
e5490c80af3caaabe2cf132a40387db2574713dc
|
305fa20e344f8ad24514dff959be3e4e3632645e
|
refs/heads/master
| 2021-01-22T23:26:51.921743
| 2015-03-17T17:52:22
| 2015-03-17T17:52:22
| 29,359,319
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tango_with_django_project.settings')
import django
django.setup()
from rango.models import Category, Page
def populate():
python_cat = add_cat('Python', 128, 64)
add_page(cat=python_cat,
title = "Official Python Tutorial",
url = "http://docs.python.org/2/tutorial/")
add_page(cat = python_cat,
title = "How to think like a computer scientist",
url = "http://www.greenteapress.com/thinkpython/")
add_page(cat = python_cat,
title = "Learn Python in 10 minutes",
url = "http://www.korokithakis.net/tutorials/python/")
django_cat = add_cat("Django", 64, 32)
add_page(cat = django_cat,
title = "Official Django Tutorial",
url = "http://bottlepy.org/docs/dev/")
add_page(cat = django_cat,
title = "Django Rocks",
url = "http://www.djangorocks.com/")
add_page(cat = django_cat,
title = "How to Tango with Django",
url = "http://www.tangowithdjango.com/")
frame_cat = add_cat("Other Frameworks", 32, 16)
add_page(cat = frame_cat,
title = "Bottle",
url = "http://bottlepy.org/docs/dev")
add_page(cat = frame_cat,
title = "Flask",
url = "http://flask.pocoo.org")
for c in Category.objects.all():
for p in Page.objects.filter(category = c):
print "- {0} - {1}".format(str(c), str(p))
def add_page(cat, title, url, views = 0):
p = Page.objects.get_or_create(category=cat, title = title)[0]
p.url = url
p.views = views
p.save()
return p
def add_cat(name, views=0, likes=0):
c = Category.objects.get_or_create(name=name)[0]
c.views = views
c.likes = likes
c.save()
return c
if __name__ == '__main__':
print "Starting Rango population script..."
populate()
|
[
"zabensley@gmail.com"
] |
zabensley@gmail.com
|
40f6ac93d8d20662057715e8f2cbaa97ea5555a2
|
850d778687e3692ab2a38d4d2227391d92c21e6b
|
/atcoder.jp/code-formula-2014-qualb/code_formula_2014_qualB_c/Main.py
|
64274ab66ae68c3b570b2c37dd74d7c974e4130a
|
[] |
no_license
|
Valkyrja3607/AtCoder
|
77e2e5e66c0e8e12bb902c35f679119c6576fad7
|
9218a50b1eb83e4498845d15d9dda41fab90ed73
|
refs/heads/master
| 2023-07-15T20:38:52.911301
| 2018-05-30T17:56:22
| 2018-05-30T17:56:22
| 294,980,006
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 902
|
py
|
a=input()
b=input()
n=len(a)
import collections
l1=collections.Counter(list(a))
l2=collections.Counter(list(b))
if l1!=l2:
print("NO")
exit()
cn=0
s1=[]
s2=[]
l=[]
for i,j in zip(a,b):
if i!=j:
s1.append(i)
s2.append(j)
cn+=1
else:
l.append(i)
c=collections.Counter(l)
ll=c.most_common()
stock=[list(tup) for tup in ll]
if cn>6:
print("NO")
else:
while len(s1)<6:
if len(s1)==n:
break
stock[0][1]-=1
s1.append(stock[0][0])
s2.append(stock[0][0])
if stock[0][1]==0:
del stock[0]
import itertools
cn=len(s1)
swaps=list(itertools.combinations(list(range(cn)),2))
for p in itertools.product(swaps,repeat=3):
s=s1.copy()
for i,j in p:
s[i],s[j]=s[j],s[i]
if s==s2:
print("YES")
exit()
print("NO")
|
[
"purinjolly@gmail.com"
] |
purinjolly@gmail.com
|
fb21e4714b35a21708dd10bbf7e8713cdec95421
|
ae71e532468e861e3a9fcb90f613eddca267ace6
|
/routes/class_reports.py
|
72d6d93574c1858ca22af86f2c905041302f7431
|
[
"CC-BY-4.0"
] |
permissive
|
soon14/proms-4.0
|
0b4ed398125e529c13dc8f0d9b0c14e0348ae5c6
|
6c3a1fd62c9394761664e100fc1dde50fd79dc11
|
refs/heads/master
| 2020-09-23T20:33:56.716317
| 2019-06-09T04:01:29
| 2019-06-09T04:01:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,412
|
py
|
from .class_incoming import IncomingClass
import io
import uuid
from rdflib import Graph, Namespace, URIRef, Literal, RDF, XSD
from . import api_functions
import modules.rulesets.reports as report_rulesets
import settings
from modules.ldapi import LDAPI
from datetime import datetime
class IncomingReport(IncomingClass):
def __init__(self, request):
IncomingClass.__init__(self, request)
self.type = None
self._generate_named_graph_uri()
def valid(self):
"""Validates an incoming Report using direct tests (can it be parsed?) and appropriate RuleSets"""
# try to parse the Report data
try:
#print([item[1] for item in LDAPI.MIMETYPES_PARSERS if item[0] == self.request.mimetype][0])
self.graph = Graph().parse(
#io.StringIO(self.request.data),
data=self.request.data.decode(encoding="utf-8"),
format=[item[1] for item in LDAPI.MIMETYPES_PARSERS if item[0] == self.request.mimetype][0]
)
except Exception as e:
self.error_messages = ['The serialised data cannot be parsed. Is it valid RDF?',
'Parser says: ' + str(e)]
return False
# try to determine Report type
result = self.graph.query('''
PREFIX proms: <http://promsns.org/def/proms#>
SELECT DISTINCT ?type WHERE {
?r a ?type .
FILTER (?type = proms:BasicReport || ?type = proms:ExternalReport || ?type = proms:InternalReport)
}
''')
if len(result) != 1:
self.error_messages = [
'Could not determine Report type. Must be one of proms:BasicReport, proms:ExternalReport or '
'proms:InternalReport'
]
return False
else:
for row in result:
self.type = str(row[0])
# choose RuleSet based on Report type
if self.type == 'http://promsns.org/def/proms#BasicReport':
conformant_report = report_rulesets.BasicReport(self.graph)
elif self.type == 'http://promsns.org/def/proms#ExternalReport':
conformant_report = report_rulesets.ExternalReport(self.graph)
else: # self.report_type == 'InternalReport':
conformant_report = report_rulesets.InternalReport(self.graph)
if not conformant_report.passed:
self.error_messages = conformant_report.fail_reasons
return False
# if the Report has been parsed, we have found the Report type and it's passed it's relevant RuleSet, it's valid
return True
def determine_uri(self):
"""Determines the URI for this Report"""
# TODO: replace these two SPARQL queries with one, use the inverse of the "placeholder" find
# if this Report has a placeholder URI, generate a new one
q = '''
SELECT ?uri
WHERE {
{ ?uri a <http://promsns.org/def/proms#BasicReport> . }
UNION
{ ?uri a <http://promsns.org/def/proms#ExternalReport> . }
UNION
{ ?uri a <http://promsns.org/def/proms#InternalReport> . }
FILTER regex(str(?uri), "placeholder")
}
'''
uri = None
for r in self.graph.query(q):
uri = r['uri']
if uri is not None:
self._generate_new_uri(uri)
else:
# since it has an existing URI, not a placeholder one, use the existing one
q = '''
SELECT ?uri
WHERE {
{ ?uri a <http://promsns.org/def/proms#BasicReport> . }
UNION
{ ?uri a <http://promsns.org/def/proms#ExternalReport> . }
UNION
{ ?uri a <http://promsns.org/def/proms#InternalReport> . }
}
'''
for r in self.graph.query(q):
self.uri = r['uri']
return True
def _generate_new_uri(self, old_uri):
# ask PROMS Server for a new Report URI
new_uri = settings.REPORT_BASE_URI + str(uuid.uuid4())
self.uri = new_uri
# add that new URI to the in-memory graph
api_functions.replace_uri(self.graph, old_uri, new_uri)
def _generate_named_graph_uri(self):
self.named_graph_uri = settings.REPORT_NAMED_GRAPH_BASE_URI + str(uuid.uuid4())
def generate_named_graph_metadata(self):
PROV = Namespace('http://www.w3.org/ns/prov#')
self.graph.bind('prov', PROV)
PROMS = Namespace('http://promsns.org/def/proms#')
self.graph.bind('proms', PROMS)
DCT = Namespace('http://purl.org/dc/terms/')
self.graph.bind('dct', DCT)
self.graph.add((
URIRef(self.named_graph_uri),
RDF.type,
PROMS.ReportNamedGraph
))
# ... the date this Report was sent to this PROMS Server
self.graph.add((
URIRef(self.named_graph_uri),
DCT.dateSubmitted,
Literal(datetime.now().isoformat(), datatype=XSD.dateTime)
))
# ... who contributed this Report
self.graph.add((
URIRef(self.named_graph_uri),
DCT.contributor,
URIRef(self.request.remote_addr)
))
|
[
"m13001282105@163.com"
] |
m13001282105@163.com
|
9535709abf88b3bbdab94e25bd9aad683cadde85
|
63b0fed007d152fe5e96640b844081c07ca20a11
|
/ARC/ARC122/a.py
|
9e9961e0f2d5cbd1eda85e6f6dbb01fe302c0653
|
[] |
no_license
|
Nikkuniku/AtcoderProgramming
|
8ff54541c8e65d0c93ce42f3a98aec061adf2f05
|
fbaf7b40084c52e35c803b6b03346f2a06fb5367
|
refs/heads/master
| 2023-08-21T10:20:43.520468
| 2023-08-12T09:53:07
| 2023-08-12T09:53:07
| 254,373,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
n=int(input())
a=list(map(int,input().split()))
dp0=[0]
dp1=[0]
mod=10**9+7
x=0
y=1
for i in range(n):
p=a[i]
q=dp0[-1]
r=dp1[-1]
dp0.append(q+r+y*p)
dp1.append(q-x*p)
x,y=y,x+y
ans=(dp0[n]+dp1[n])%mod
print(ans)
|
[
"ymdysk911@gmail.com"
] |
ymdysk911@gmail.com
|
953361aa2a76f53cdaddda8221489ba1aab88156
|
9b6f36f544af5a2c1c042b18dda920c78fd11331
|
/omsBackend/apps/zbmanager/serializers.py
|
dcfe3133e33a0c61dbd9aaed1ab434d86e103d18
|
[] |
no_license
|
Nikita-stels/MyOms
|
a946f08b4ba7abfa8392e98c579320b501a7ca2a
|
fdaf9d5a2a29b5386c1a86fcf89a2c0d5527687a
|
refs/heads/master
| 2022-09-17T20:40:45.228067
| 2020-01-08T14:41:04
| 2020-01-08T14:41:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
# -*- coding: utf-8 -*-
# author: huashaw
from rest_framework import serializers
class ZbHostSerializer(serializers.Serializer):
hostid = serializers.IntegerField()
host = serializers.CharField()
status = serializers.CharField()
groups = serializers.JSONField()
parentTemplates = serializers.JSONField()
interfaces = serializers.JSONField()
class ZbHostGroupSerializer(serializers.Serializer):
groupid = serializers.IntegerField()
name = serializers.CharField()
hosts = serializers.JSONField()
class ZbTemplateSerializer(serializers.Serializer):
templateid = serializers.IntegerField()
host = serializers.CharField()
|
[
"1069195546@qq.com"
] |
1069195546@qq.com
|
9d3d87b1db818f478f4aa85b0c257eee39b0700b
|
c609730a43596a2d3303f072fc97d9cf681fac7b
|
/cagey/usedcar/main_haoche99.py
|
386a9e6ee701ee754cd28189f895ff6701bf3b18
|
[] |
no_license
|
sinnettluo/ChenProject
|
5403311c0c7b78c484145e16d692abff00d2a110
|
0e33ecf1683afb22f1deb4bd54294c41aed8a46b
|
refs/heads/master
| 2023-03-22T23:48:08.430178
| 2020-09-02T15:05:02
| 2020-09-02T15:05:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 191
|
py
|
from scrapy.cmdline import execute
import sys
import os
website = "haoche99"
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
execute(["scrapy", "crawl", website])
|
[
"1316446041@qq.com"
] |
1316446041@qq.com
|
d3550d7689399933bc52ca671f322510fc34bf23
|
d94c5849e6308901f9af8a4edf8c8369d46576d1
|
/BOJ/14499_주사위 굴리기.py
|
22ad001dcfef81e9fc7a3e7aee0a5e29963d830e
|
[] |
no_license
|
AhnDogeon/algorithm_study
|
b4c961b934b5e27afccdf2713a2ccb0174d9a698
|
b8de39fff92cc98281ba7e94df82bcc9b1503243
|
refs/heads/master
| 2022-06-05T11:33:27.392131
| 2022-05-23T06:37:50
| 2022-05-23T06:37:50
| 188,783,176
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,095
|
py
|
import sys
from copy import deepcopy
sys.stdin = open('14499_주사위 굴리기.txt', 'r')
N, M, x, y, K = map(int, input().split())
board = []
for _ in range(N):
board_list = list(map(int, input().split()))
board.append(board_list)
move = list(map(int, input().split()))
# print(move)
#
# print('===========디버깅=====================')
# for i in range(N):
# for j in range(M):
# print(board[i][j], end=' ')
# print()
# print('=====================================')
up = 0
middle = 0
left = 0
right = 0
down = 0
bottom = 0
def RIGHT(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_up
middle = copy_left
left = copy_bottom
right = copy_middle
down = copy_down
bottom = copy_right
board[a][b] = bottom
else:
up = copy_up
middle = copy_left
left = copy_bottom
right = copy_middle
down = copy_down
bottom = board[a][b]
board[a][b] = 0
print(middle)
def LEFT(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_up
middle = copy_right
left = copy_middle
right = copy_bottom
down = copy_down
bottom = copy_left
board[a][b] = bottom
else:
up = copy_up
middle = copy_right
left = copy_middle
right = copy_bottom
down = copy_down
bottom = board[a][b]
board[a][b] = 0
print(middle)
def UP(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_middle
middle = copy_down
left = copy_left
right = copy_right
down = copy_bottom
bottom = copy_up
board[a][b] = bottom
else:
up = copy_middle
middle = copy_down
left = copy_left
right = copy_right
down = copy_bottom
bottom = board[a][b]
board[a][b] = 0
print(middle)
def DOWN(a, b):
global board, up, middle, left, right, down, bottom
copy_up = deepcopy(up)
copy_middle = deepcopy(middle)
copy_left = deepcopy(left)
copy_right = deepcopy(right)
copy_down = deepcopy(down)
copy_bottom = deepcopy(bottom)
if board[a][b] == 0:
up = copy_bottom
middle = copy_up
left = copy_left
right = copy_right
down = copy_middle
bottom = copy_down
board[a][b] = bottom
else:
up = copy_bottom
middle = copy_up
left = copy_left
right = copy_right
down = copy_middle
bottom = board[a][b]
board[a][b] = 0
print(middle)
for i in move:
if i == 1:
dx, dy = x, y + 1
if 0 <= dx < N and 0 <= dy < M:
RIGHT(dx, dy)
x, y = dx, dy
elif i == 2:
dx, dy = x, y - 1
if 0 <= dx < N and 0 <= dy < M:
LEFT(dx, dy)
x, y = dx, dy
elif i == 3:
dx, dy = x - 1, y
if 0 <= dx < N and 0 <= dy < M:
UP(dx, dy)
x, y = dx, dy
elif i == 4:
dx, dy = x + 1, y
if 0 <= dx < N and 0 <= dy < M:
DOWN(dx, dy)
x, y = dx, dy
#
# print('===========디버깅=====================')
# for i in range(N):
# for j in range(M):
# print(board[i][j], end=' ')
# print()
# print('=====================================')
#
|
[
"qltiqlti@gmail.com"
] |
qltiqlti@gmail.com
|
0d5757a1a9ed5bcbb7dbb9f0d6480b75d12b5efe
|
4d1f1e188a4db8e909430b55bddf0d8113a28fcf
|
/reinforcement_learning/0x00-q_learning/2-epsilon_greedy.py
|
5b5895e3aafe8d93a6fc7131ffb272cf3044f4a9
|
[] |
no_license
|
paurbano/holbertonschool-machine_learning
|
b0184a71733a1f51633ba7c7f4d3a82b8d50e94f
|
ff1af62484620b599cc3813068770db03b37036d
|
refs/heads/master
| 2023-07-02T16:20:13.668083
| 2023-06-18T06:25:26
| 2023-06-18T06:25:26
| 279,967,511
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
#!/usr/bin/env python3
'''Epsilon Greedy
https://github.com/simoninithomas/Deep_reinforcement_learning_Course/blob/
master/Q%20learning/FrozenLake/Q%20Learning%20with%20FrozenLake.ipynb
'''
import numpy as np
def epsilon_greedy(Q, state, epsilon):
'''uses epsilon-greedy to determine the next action:
Args:
Q is a numpy.ndarray containing the q-table
state is the current state
epsilon is the epsilon to use for the calculation
Returns: the next action index
'''
# First we randomize a number
p = np.random.uniform(0, 1)
# If this number > greater than epsilon -->
# exploitation (taking the biggest Q value for this state)
if p > epsilon:
action = np.argmax(Q[state, :])
# Else doing a random choice --> exploration
else:
# action = env.action_space.sample()
action = np.random.randint(0, int(Q.shape[1]))
return action
|
[
"paurbano@gmail.com"
] |
paurbano@gmail.com
|
0eed1e43e88e22d5e74f9010387e7ad031989714
|
472baa2414822520f7cb8d491d4bf5608f765ad8
|
/zqxt4396/tools/views.py
|
3d5f7f76bda31af965d9c812557cadea6c386f1e
|
[] |
no_license
|
Umi101108/django-projects
|
cdcf0c9bb8bd272e04a4b7a702f09adb16c28404
|
50edfdc3511e1de5b4a5a3e92fe9ddad932b5396
|
refs/heads/master
| 2021-01-12T08:20:48.113696
| 2017-06-11T14:45:20
| 2017-06-11T14:45:20
| 76,545,822
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 272
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'index.html')
def add(request):
a = request.GET['a']
b = request.GET['b']
a = int(a)
b = int(b)
return HttpResponse(str(a+b))
|
[
"408465808@qq.com"
] |
408465808@qq.com
|
a5680836916c2ce43cd2b4b36b019cde8f18cee4
|
1adf769cf9234f9b6c619f808d2723b99451d679
|
/rusentrel/classic/mi/pcnn.py
|
825d23c100525d15bf520d848194da8230315155
|
[
"MIT"
] |
permissive
|
DAVMARROS/attitude-extraction-with-attention-and-ds
|
4e85fa154ead0cd9499aaedf5d752ac565f37b92
|
fb8e9d0d9488363738a88c4c447c7a8cb3e2ec1d
|
refs/heads/master
| 2023-02-09T04:56:24.090380
| 2020-12-30T10:09:34
| 2020-12-30T10:09:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,447
|
py
|
#!/usr/bin/python
import sys
sys.path.append('../../../')
from io_utils import RuSentRelBasedExperimentsIOUtils
from arekit.contrib.experiments.callback import CustomCallback
from arekit.contrib.networks.multi.configurations.max_pooling import MaxPoolingOverSentencesConfig
from arekit.contrib.networks.multi.architectures.max_pooling import MaxPoolingOverSentences
from arekit.common.evaluation.evaluators.two_class import TwoClassEvaluator
from arekit.contrib.networks.context.configurations.cnn import CNNConfig
from arekit.contrib.networks.context.architectures.pcnn import PiecewiseCNN
from arekit.contrib.experiments.multi.model import MultiInstanceTensorflowModel
from arekit.contrib.experiments.nn_io.rusentrel import RuSentRelBasedNeuralNetworkIO
from arekit.contrib.experiments.engine import run_testing
from rusentrel.mi_names import MaxPoolingModelNames
from rusentrel.classic.ctx.pcnn import ctx_pcnn_custom_config
from rusentrel.classic.common import \
classic_common_callback_modification_func, \
classic_mi_common_config_settings
def mi_pcnn_custom_config(config):
ctx_pcnn_custom_config(config.ContextConfig)
config.fix_context_parameters()
def run_testing_pcnn(name_prefix=u'',
cv_count=1,
model_names_classtype=MaxPoolingModelNames,
network_classtype=MaxPoolingOverSentences,
config_classtype=MaxPoolingOverSentencesConfig,
custom_config_func=mi_pcnn_custom_config,
custom_callback_func=classic_common_callback_modification_func):
run_testing(full_model_name=name_prefix + model_names_classtype().PCNN,
create_network=lambda: network_classtype(context_network=PiecewiseCNN()),
create_config=lambda: config_classtype(context_config=CNNConfig()),
create_nn_io=RuSentRelBasedNeuralNetworkIO,
cv_count=cv_count,
create_model=MultiInstanceTensorflowModel,
evaluator_class=TwoClassEvaluator,
create_callback=CustomCallback,
experiments_io=RuSentRelBasedExperimentsIOUtils(),
common_callback_modification_func=custom_callback_func,
custom_config_modification_func=custom_config_func,
common_config_modification_func=classic_mi_common_config_settings)
if __name__ == "__main__":
run_testing_pcnn()
|
[
"kolyarus@yandex.ru"
] |
kolyarus@yandex.ru
|
ae9f47dcd6973ca4c8e603f1503be4d5ca8b26ce
|
a9063fd669162d4ce0e1d6cd2e35974274851547
|
/test/test_role_members_add.py
|
ed565058c42a11f8a5eb9894159405db3ff757a7
|
[] |
no_license
|
rootalley/py-zoom-api
|
9d29a8c750e110f7bd9b65ff7301af27e8518a3d
|
bfebf3aa7b714dcac78be7c0affb9050bbce8641
|
refs/heads/master
| 2022-11-07T14:09:59.134600
| 2020-06-20T18:13:50
| 2020-06-20T18:13:50
| 273,760,906
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
# coding: utf-8
"""
Zoom API
The Zoom API allows developers to safely and securely access information from Zoom. You can use this API to build private services or public applications on the [Zoom App Marketplace](http://marketplace.zoom.us). To learn how to get your credentials and create private/public applications, read our [Authorization Guide](https://marketplace.zoom.us/docs/guides/authorization/credentials). All endpoints are available via `https` and are located at `api.zoom.us/v2/`. For instance you can list all users on an account via `https://api.zoom.us/v2/users/`. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: developersupport@zoom.us
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from models.role_members_add import RoleMembersAdd # noqa: E501
from swagger_client.rest import ApiException
class TestRoleMembersAdd(unittest.TestCase):
"""RoleMembersAdd unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRoleMembersAdd(self):
"""Test RoleMembersAdd"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.role_members_add.RoleMembersAdd() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"github@rootalley.com"
] |
github@rootalley.com
|
cad08c6af20f321507af6bc050e428731b67a33f
|
7dc240e587213e4b420676c60aa1b24905b1b2e4
|
/src/app/tests/mailchimp/conftest.py
|
d5af1f4a3624389007aae35e1b133692b303f6ce
|
[
"MIT"
] |
permissive
|
denokenya/education-backend
|
834d22280717f15f93407108846e2eea767421c8
|
3b43ba0cc54c6a2fc2f1716170393f943323a29b
|
refs/heads/master
| 2023-08-27T09:07:48.257108
| 2021-11-03T00:19:04
| 2021-11-03T00:19:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
import pytest
import requests_mock
from app.integrations.mailchimp import AppMailchimp, MailchimpMember
pytestmark = [pytest.mark.django_db]
@pytest.fixture(autouse=True)
def _set_mailchimp_credentials(settings):
settings.MAILCHIMP_API_KEY = 'key-us05'
settings.MAILCHIMP_CONTACT_LIST_ID = '123cba'
@pytest.fixture
def mailchimp():
client = AppMailchimp()
with requests_mock.Mocker() as http_mock:
client.http_mock = http_mock
yield client
@pytest.fixture
def mailchimp_member(user):
return MailchimpMember.from_django_user(user)
@pytest.fixture
def post(mocker):
return mocker.patch('app.integrations.mailchimp.http.MailchimpHTTP.post')
@pytest.fixture
def user(mixer):
return mixer.blend('users.User', email='test@e.mail', first_name='Rulon', last_name='Oboev')
|
[
"noreply@github.com"
] |
denokenya.noreply@github.com
|
09a5dcf778c742d075bd8decf005f393a6b3b6e6
|
e6d1bbac91b97ee7a9d028c3aafa5d85a0ee593c
|
/Python04Month/chapter/chapter3/demo/code/3-1_abnormal_check.py
|
bd08daf230d7e50525b8458610580eb8e1138662
|
[] |
no_license
|
LiuJingGitLJ/PythonSuanFa_2
|
82159043523d6fe69beef7f86421cd4be2242919
|
0afba93c4c29231bc6c2aaf6e4663beee2b5cbbb
|
refs/heads/master
| 2021-09-20T13:49:08.521080
| 2018-08-10T06:13:22
| 2018-08-10T06:13:22
| 124,337,675
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,135
|
py
|
#-*- coding: utf-8 -*-
import pandas as pd
catering_sale = '../data/catering_sale.xls' #餐饮数据
data = pd.read_excel(catering_sale, index_col = u'日期') #读取数据,指定“日期”列为索引列
print(data)
import matplotlib.pyplot as plt #导入图像库
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
plt.figure() #建立图像
p = data.boxplot(return_type='dict') #画箱线图,直接使用DataFrame的方法
x = p['fliers'][0].get_xdata() # 'flies'即为异常值的标签
y = p['fliers'][0].get_ydata()
y.sort() #从小到大排序,该方法直接改变原对象
#用annotate添加注释
#其中有些相近的点,注解会出现重叠,难以看清,需要一些技巧来控制。
#以下参数都是经过调试的,需要具体问题具体调试。
for i in range(len(x)):
if i>0:
plt.annotate(y[i], xy = (x[i],y[i]), xytext=(x[i]+0.05 -0.8/(y[i]-y[i-1]),y[i]))
else:
plt.annotate(y[i], xy = (x[i],y[i]), xytext=(x[i]+0.08,y[i]))
plt.show() #展示箱线图
|
[
"15201078137@163.com"
] |
15201078137@163.com
|
5b3165a574457eeb1f369cd70b0259bd520aec67
|
8e2404c7bcfd28329bed789839192b2c4e85ea1b
|
/LeetCode/Linked_List_Cycle_II.py
|
ca97be57324afaacc01727943d36debb9971ccae
|
[] |
no_license
|
Pabitra-26/Problem-Solved
|
408bd51bbffc69f8c5e1def92797c2e6f027f91d
|
c27de1dd6c4ad14444fa5ee911a16186c200a7f9
|
refs/heads/master
| 2023-07-30T16:51:28.062349
| 2021-09-27T06:06:54
| 2021-09-27T06:06:54
| 269,935,039
| 2
| 0
| null | 2021-09-27T06:06:55
| 2020-06-06T09:39:33
|
Python
|
UTF-8
|
Python
| false
| false
| 886
|
py
|
# Problem name: Linked List Cycle II
# Description: Given a linked list, return the node where the cycle begins. If there is no cycle, return null.
# To represent a cycle in the given linked list, we use an integer pos which represents the position (0-indexed) in the linked list where tail connects to.
# If pos is -1, then there is no cycle in the linked list.
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
table={}
curr=head
i=0
m=0
while(curr is not None):
if(curr in table):
m=1
return curr
else:
table[curr]=1
curr=curr.next
if(m==0):
return None
|
[
"noreply@github.com"
] |
Pabitra-26.noreply@github.com
|
2809b47d249d56790cb08fb8a0c7d5f1fbdd146e
|
d53baf0a3aaa10521cfc28a7be8f2c498bc9e741
|
/examples/CaffeModels/load-vgg16.py
|
96780e85eac94a3b1709a479d22cf2e3faa232fd
|
[
"Apache-2.0"
] |
permissive
|
qianlinjun/tensorpack
|
8f6e99ba17095334de1163d6412e740642343752
|
7f505225cd41aaeee3a0b0688fe67afc0af8fb30
|
refs/heads/master
| 2020-03-29T22:38:22.269889
| 2018-09-25T07:20:48
| 2018-09-25T07:20:48
| 150,432,021
| 1
| 0
|
Apache-2.0
| 2018-09-26T13:35:19
| 2018-09-26T13:35:18
| null |
UTF-8
|
Python
| false
| false
| 3,493
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: load-vgg16.py
from __future__ import print_function
import cv2
import tensorflow as tf
import numpy as np
import os
import six
import argparse
from tensorpack import *
from tensorpack.dataflow.dataset import ILSVRCMeta
enable_argscope_for_module(tf.layers)
def tower_func(image):
is_training = get_current_tower_context().is_training
with argscope([tf.layers.conv2d], kernel_size=3, activation=tf.nn.relu, padding='same'):
x = image
x = tf.layers.conv2d(x, 64, name='conv1_1')
x = tf.layers.conv2d(x, 64, name='conv1_2')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool1')
x = tf.layers.conv2d(x, 128, name='conv2_1')
x = tf.layers.conv2d(x, 128, name='conv2_2')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool2')
x = tf.layers.conv2d(x, 256, name='conv3_1')
x = tf.layers.conv2d(x, 256, name='conv3_2')
x = tf.layers.conv2d(x, 256, name='conv3_3')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool3')
x = tf.layers.conv2d(x, 512, name='conv4_1')
x = tf.layers.conv2d(x, 512, name='conv4_2')
x = tf.layers.conv2d(x, 512, name='conv4_3')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool4')
x = tf.layers.conv2d(x, 512, name='conv5_1')
x = tf.layers.conv2d(x, 512, name='conv5_2')
x = tf.layers.conv2d(x, 512, name='conv5_3')
x = tf.layers.max_pooling2d(x, 2, 2, name='pool5')
x = tf.layers.flatten(x, name='flatten')
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc6')
x = tf.layers.dropout(x, rate=0.5, name='drop0', training=is_training)
x = tf.layers.dense(x, 4096, activation=tf.nn.relu, name='fc7')
x = tf.layers.dropout(x, rate=0.5, name='drop1', training=is_training)
logits = tf.layers.dense(x, 1000, activation=tf.identity, name='fc8')
tf.nn.softmax(logits, name='prob')
def run_test(path, input):
param_dict = dict(np.load(path))
param_dict = {k.replace('/W', '/kernel').replace('/b', '/bias'): v for k, v in six.iteritems(param_dict)}
predict_func = OfflinePredictor(PredictConfig(
inputs_desc=[InputDesc(tf.float32, (None, 224, 224, 3), 'input')],
tower_func=tower_func,
session_init=DictRestore(param_dict),
input_names=['input'],
output_names=['prob'] # prob:0 is the probability distribution
))
im = cv2.imread(input)
assert im is not None, input
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = cv2.resize(im, (224, 224)).reshape((1, 224, 224, 3)).astype('float32')
# VGG16 requires channelwise mean substraction
VGG_MEAN = [103.939, 116.779, 123.68]
im -= VGG_MEAN[::-1]
outputs = predict_func(im)[0]
prob = outputs[0]
ret = prob.argsort()[-10:][::-1]
print("Top10 predictions:", ret)
meta = ILSVRCMeta().get_synset_words_1000()
print("Top10 class names:", [meta[k] for k in ret])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', required=True,
help='.npz model file generated by tensorpack.utils.loadcaffe')
parser.add_argument('--input', help='an input image', required=True)
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
run_test(args.load, args.input)
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
50ddae41737c1856fdea70885af523908cdebab0
|
d83fa072a084642ebaa40317dda61f7a2f660284
|
/cleancoderscom/gateways/codecast_gateway.py
|
89edbc42cf18f413af36449ce9f5bf8e0749df70
|
[] |
no_license
|
xstrengthofonex/CleanCodeCaseStudy
|
479ca1f0c028f3f481635b23bf44363fd50dec18
|
312aeef9f2127033f2b9e0b4a2c41baf4e6cc01e
|
refs/heads/master
| 2021-01-02T22:55:50.471384
| 2017-08-06T14:36:17
| 2017-08-06T14:36:17
| 99,425,289
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
from abc import ABCMeta, abstractmethod
from typing import List, Optional
from cleancoderscom.entities.codecast import Codecast
class CodecastGateway(metaclass=ABCMeta):
@abstractmethod
def find_all_codecasts_ordered_by_date(self) -> List[Codecast]:
pass
@abstractmethod
def find_codecast_by_title(self, title) -> Optional[Codecast]:
pass
|
[
"xstrengthofonex@gmail.com"
] |
xstrengthofonex@gmail.com
|
48035def9dc27ef8655ec0557839d1a7558ed009
|
08bfc8a1f8e44adc624d1f1c6250a3d9635f99de
|
/SDKs/Qt/5.12.3_python_37/msvc2017_64/PySide/PySide2/scripts/uic.py
|
1471f24152ba72980656c2caa300f5e965452b38
|
[] |
no_license
|
Personwithhat/CE_SDKs
|
cd998a2181fcbc9e3de8c58c7cc7b2156ca21d02
|
7afbd2f7767c9c5e95912a1af42b37c24d57f0d4
|
refs/heads/master
| 2020-04-09T22:14:56.917176
| 2019-07-04T00:19:11
| 2019-07-04T00:19:11
| 160,623,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:7342dc46431b086d9ffeed1ae7e528d3b0e53a3dc1ccd79003825db7ec8dad8e
size 2880
|
[
"personwithhats2@Gmail.com"
] |
personwithhats2@Gmail.com
|
fca8833ff2ffcf10a7e5395e8b705cd0a33fad29
|
cd4bbecc3f713b0c25508d0c5674d9e103db5df4
|
/toontown/building/DistributedAnimDoor.py
|
37bb7065eba4aa04a774aaff39c4ee732815e3bb
|
[] |
no_license
|
peppythegod/ToontownOnline
|
dce0351cfa1ad8c476e035aa3947fdf53de916a6
|
2e5a106f3027714d301f284721382cb956cd87a0
|
refs/heads/master
| 2020-04-20T05:05:22.934339
| 2020-01-02T18:05:28
| 2020-01-02T18:05:28
| 168,646,608
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,294
|
py
|
from pandac.PandaModules import NodePath, VBase3
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import Parallel, Sequence, Wait, HprInterval, LerpHprInterval, SoundInterval
from toontown.building import DistributedDoor
from toontown.building import DoorTypes
if __debug__:
import pdb
class DistributedAnimDoor(DistributedDoor.DistributedDoor):
def __init__(self, cr):
DistributedDoor.DistributedDoor.__init__(self, cr)
base.animDoor = self
def getBuilding(self):
if 'building' not in self.__dict__:
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
searchStr = '**/??' + \
str(self.block) + ':animated_building_*_DNARoot;+s'
self.notify.debug('searchStr=%s' % searchStr)
self.building = self.cr.playGame.hood.loader.geom.find(
searchStr)
else:
self.notify.error(
'DistributedAnimDoor.getBuiding with doorType=%s' %
self.doorType)
return self.building
def getDoorNodePath(self):
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
if hasattr(self, 'tempDoorNodePath'):
return self.tempDoorNodePath
else:
building = self.getBuilding()
doorNP = building.find('**/door_origin')
self.notify.debug('creating doorOrigin at %s %s' % (str(
doorNP.getPos()), str(doorNP.getHpr())))
otherNP = NodePath('doorOrigin')
otherNP.setPos(doorNP.getPos())
otherNP.setHpr(doorNP.getHpr())
otherNP.reparentTo(doorNP.getParent())
self.tempDoorNodePath = otherNP
else:
self.notify.error(
'DistributedAnimDoor.getDoorNodePath with doorType=%s' %
self.doorType)
return otherNP
def setTriggerName(self):
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
building = self.getBuilding()
if not building.isEmpty():
doorTrigger = building.find('**/door_0_door_trigger')
if not doorTrigger.isEmpty():
doorTrigger.node().setName(self.getTriggerName())
else:
self.notify.warning('setTriggerName failed no building')
else:
self.notify.error('setTriggerName doorTYpe=%s' % self.doorType)
def getAnimBuilding(self):
if 'animBuilding' not in self.__dict__:
if self.doorType == DoorTypes.EXT_ANIM_STANDARD:
bldg = self.getBuilding()
key = bldg.getParent().getParent()
animPropList = self.cr.playGame.hood.loader.animPropDict.get(
key)
if animPropList:
for prop in animPropList:
if bldg == prop.getActor().getParent():
self.animBuilding = prop
break
continue
else:
self.notify.error('could not find' + str(key))
else:
self.notify.error('No such door type as ' + str(self.doorType))
return self.animBuilding
def getBuildingActor(self):
result = self.getAnimBuilding().getActor()
return result
def enterOpening(self, ts):
bldgActor = self.getBuildingActor()
rightDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_right_door')
if rightDoor.isEmpty():
self.notify.warning('enterOpening(): did not find rightDoor')
return None
otherNP = self.getDoorNodePath()
trackName = 'doorOpen-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Parallel(
SoundInterval(self.openSfx, node=rightDoor),
Sequence(
HprInterval(rightDoor, VBase3(0, 0, 0)),
Wait(0.40000000000000002),
LerpHprInterval(
nodePath=rightDoor,
duration=0.59999999999999998,
hpr=VBase3(h, 0, 0),
startHpr=VBase3(0, 0, 0),
blendType='easeInOut')),
name=trackName)
self.doorTrack.start(ts)
def enterClosing(self, ts):
bldgActor = self.getBuildingActor()
rightDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_right_door')
if rightDoor.isEmpty():
self.notify.warning('enterClosing(): did not find rightDoor')
return None
otherNP = self.getDoorNodePath()
trackName = 'doorClose-%d' % self.doId
if self.rightSwing:
h = 100
else:
h = -100
self.finishDoorTrack()
self.doorTrack = Sequence(
LerpHprInterval(
nodePath=rightDoor,
duration=1.0,
hpr=VBase3(0, 0, 0),
startHpr=VBase3(h, 0, 0),
blendType='easeInOut'),
SoundInterval(self.closeSfx, node=rightDoor),
name=trackName)
self.doorTrack.start(ts)
if hasattr(self, 'done'):
request = self.getRequestStatus()
messenger.send('doorDoneEvent', [request])
def exitDoorEnterOpening(self, ts):
bldgActor = self.getBuildingActor()
leftDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_left_door')
if self.leftSwing:
h = -100
else:
h = 100
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorDoorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Parallel(
SoundInterval(self.openSfx, node=leftDoor),
Sequence(
LerpHprInterval(
nodePath=leftDoor,
duration=0.59999999999999998,
hpr=VBase3(h, 0, 0),
startHpr=VBase3(0, 0, 0),
blendType='easeInOut')),
name=trackName)
self.doorExitTrack.start(ts)
else:
self.notify.warning(
'exitDoorEnterOpening(): did not find leftDoor')
def exitDoorEnterClosing(self, ts):
bldgActor = self.getBuildingActor()
leftDoor = bldgActor.controlJoint(None, 'modelRoot', 'def_left_door')
if self.leftSwing:
h = -100
else:
h = 100
if not leftDoor.isEmpty():
otherNP = self.getDoorNodePath()
trackName = 'doorExitTrack-%d' % self.doId
self.finishDoorExitTrack()
self.doorExitTrack = Sequence(
LerpHprInterval(
nodePath=leftDoor,
duration=1.0,
hpr=VBase3(0, 0, 0),
startHpr=VBase3(h, 0, 0),
blendType='easeInOut'),
SoundInterval(self.closeSfx, node=leftDoor),
name=trackName)
self.doorExitTrack.start(ts)
|
[
"47166977+peppythegod@users.noreply.github.com"
] |
47166977+peppythegod@users.noreply.github.com
|
aa718ed8354abdea50f56b54e171775a136dd57a
|
dd116fe1e94191749ab7a9b00be25bfd88641d82
|
/cairis/cairis/SearchDialog.py
|
c128364ca182e31bbb94073ecd249cd1315fc760
|
[
"Apache-2.0"
] |
permissive
|
RobinQuetin/CAIRIS-web
|
fbad99327707ea3b995bdfb4841a83695989e011
|
4a6822db654fecb05a09689c8ba59a4b1255c0fc
|
HEAD
| 2018-12-28T10:53:00.595152
| 2015-06-20T16:53:39
| 2015-06-20T16:53:39
| 33,935,403
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,369
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
import ARM
from SearchPanel import SearchPanel
from Borg import Borg
class SearchDialog(wx.Dialog):
def __init__(self,parent):
wx.Dialog.__init__(self,parent,armid.SEARCHMODEL_ID,'Search model',style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(700,500))
b = Borg()
self.dbProxy = b.dbProxy
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = SearchPanel(self)
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,armid.SEARCHMODEL_BUTTONFIND_ID,self.onFind)
def onFind(self,evt):
ssCtrl = self.FindWindowById(armid.SEARCHMODEL_TEXTSEARCHSTRING_ID)
ssValue = ssCtrl.GetValue()
if (len(ssValue) == 0) or (ssValue == ' '):
dlg = wx.MessageDialog(self,'Search string empty','Search model',wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
listCtrl = self.FindWindowById(armid.SEARCHMODEL_LISTRESULTS_ID)
listCtrl.DeleteAllItems()
searchOptionsCtrl = self.FindWindowById(armid.SEARCHOPTIONSPANEL_ID)
searchOptions = searchOptionsCtrl.optionFlags()
try:
searchResults = self.dbProxy.searchModel(ssValue,searchOptions)
for idx,result in enumerate(searchResults):
listCtrl.InsertStringItem(idx,result[0])
listCtrl.SetStringItem(idx,1,result[1])
listCtrl.SetStringItem(idx,2,result[2])
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Search model',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
|
[
"shamal.faily@googlemail.com"
] |
shamal.faily@googlemail.com
|
aa0d2e6554684c54501f6f150d32cf14d1cc827e
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/40/usersdata/136/21959/submittedfiles/funcoes.py
|
efca9f8ab430ae8fca7e83512158b118f168e4d3
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,338
|
py
|
#ARQUIVO COM SUAS FUNCOES
from __future__ import division
def calcula_valor_absoluto(x):
if x < 0:
x = x*(-1)
return x
def calcula_pi(m):
expr = 0
i = 1
x = 2
while i<=m:
if 1<=m<=2000: #para m maior ou igual a 1 e menor ou igual a 2000
if i%2==0: #se i for par
expr = expr - (4/(x*(x+1)*(x+2)))
else: #caso contrário
expr = expr + (4/(x*(x+1)*(x+2)))
x = x +2
i = i +1
calcula_pi = 3 + expr #pi será igual a 3 + a expressão final
return calcula_pi #a função retorna o valor de pi
def fatorial(n):
fatorial = 1
for i in range (0, n, 1):
fatorial = fatorial * i
return fatorial
def calcula_co_seno(z, epsilon):
soma = 0
i = 1
expoente = 2
fracao = (z**expoente)/fatorial(expoente) # observa-se, aqui, que é chamada a função fatorial com o exponte dentro da mesma
while fracao>epsilon:
fracao = (z**expoente)/fatorial(expoente)
if i%2==1:
soma = soma - fracao
else:
soma = soma + fracao
expoente = expoente + 2
i = i + 1
calcula_co_seno = soma + 1
return calcula_co_seno
def calcula_razao_aurea(m, epsilon):
fi = 2 * calcula_co_seno(calcula_pi(m)/5, epsilon)
return fi
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1def8bfa91528ad23d33f5f84710747a8dc3cf57
|
c0f86b926fc82baa633862896096c149dd9913cf
|
/Python/Numpy/Mean-Var-and-Std/Python2/solution.py
|
74b8d96a55af697e4421abd696b485c3a4ebf3f7
|
[] |
no_license
|
qxzsilver1/HackerRank
|
8df74dd0cd4a9dedd778cdecea395f4234eda767
|
bcb1b74711a625d8ad329a3f9fdd9f49b1bebc54
|
refs/heads/master
| 2021-09-09T15:45:35.681284
| 2021-09-07T00:11:16
| 2021-09-07T00:11:16
| 75,671,896
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
import numpy
n, m = map(int, raw_input().split())
a = numpy.array([raw_input().split() for _ in xrange(n)], int)
print numpy.mean(a, axis=1)
print numpy.var(a, axis=0)
print numpy.std(a, None)
|
[
"noreply@github.com"
] |
qxzsilver1.noreply@github.com
|
cfbdf7c3da7f8b2699eaf24f527932d1c674b6d1
|
4e44c4bbe274b0a8ccca274f29c4140dfad16d5e
|
/Push2_MIDI_Scripts/decompiled 10.1.2b5 scripts/pushbase/touch_encoder_element.py
|
f9f76e3eeae43809b8f5db8daf6b10d1825bf8fa
|
[] |
no_license
|
intergalacticfm/Push2_MIDI_Scripts
|
b48841e46b7a322f2673259d1b4131d2216f7db6
|
a074e2337b2e5d2e5d2128777dd1424f35580ae1
|
refs/heads/master
| 2021-06-24T15:54:28.660376
| 2020-10-27T11:53:57
| 2020-10-27T11:53:57
| 137,673,221
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,161
|
py
|
# uncompyle6 version 3.0.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.13 (default, Jan 19 2017, 14:48:08)
# [GCC 6.3.0 20170118]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\pushbase\touch_encoder_element.py
# Compiled at: 2018-11-27 11:59:28
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.control_surface.elements import TouchEncoderElement as TouchEncoderElementBase
class TouchEncoderObserver(object):
u""" Interface for observing the state of one or more TouchEncoderElements """
def on_encoder_touch(self, encoder):
pass
def on_encoder_parameter(self, encoder):
pass
class TouchEncoderElement(TouchEncoderElementBase):
u""" Class representing an encoder that is touch sensitive """
def __init__(self, undo_step_handler=None, delete_handler=None, *a, **k):
super(TouchEncoderElement, self).__init__(*a, **k)
self._trigger_undo_step = False
self._undo_step_open = False
self._undo_step_handler = undo_step_handler
self._delete_handler = delete_handler
self.set_observer(None)
return
def set_observer(self, observer):
if observer is None:
observer = TouchEncoderObserver()
self._observer = observer
return
def on_nested_control_element_value(self, value, control):
self._trigger_undo_step = value
if value:
param = self.mapped_parameter()
if self._delete_handler and self._delete_handler.is_deleting and param:
self._delete_handler.delete_clip_envelope(param)
else:
self.begin_gesture()
self._begin_undo_step()
self._observer.on_encoder_touch(self)
self.notify_touch_value(value)
else:
self._end_undo_step()
self._observer.on_encoder_touch(self)
self.notify_touch_value(value)
self.end_gesture()
def connect_to(self, parameter):
if parameter != self.mapped_parameter():
self.last_mapped_parameter = parameter
super(TouchEncoderElement, self).connect_to(parameter)
self._observer.on_encoder_parameter(self)
def release_parameter(self):
if self.mapped_parameter() != None:
super(TouchEncoderElement, self).release_parameter()
self._observer.on_encoder_parameter(self)
return
def receive_value(self, value):
self._begin_undo_step()
super(TouchEncoderElement, self).receive_value(value)
def disconnect(self):
super(TouchEncoderElement, self).disconnect()
self._undo_step_handler = None
return
def _begin_undo_step(self):
if self._undo_step_handler and self._trigger_undo_step:
self._undo_step_handler.begin_undo_step()
self._trigger_undo_step = False
self._undo_step_open = True
def _end_undo_step(self):
if self._undo_step_handler and self._undo_step_open:
self._undo_step_handler.end_undo_step()
|
[
"ratsnake.cbs@gmail.com"
] |
ratsnake.cbs@gmail.com
|
acd9a985926faad6a4fcbdf4d441313cd62cd668
|
b0741867b842fe177205c2fd714cabd34652ced4
|
/crawling/mmtaobao/sexpic.py
|
dd4edbee55c824bc1e1e6a92158773afc91f5084
|
[] |
no_license
|
zdYng/python
|
6737ea43b041f57e0d23598cfa2e5e23d5bd11ff
|
fd074f5700ec9733958e8640eb63af83aac3001f
|
refs/heads/master
| 2021-07-22T13:50:24.745405
| 2020-04-02T02:15:29
| 2020-04-02T02:15:29
| 93,690,795
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,062
|
py
|
# -*- coding: utf-8 -*-
import requests
import urllib2,re
import os
from mmtaobao.cons import headers
from lxml import etree
from parsel import Selector
import datetime
html =requests.get("http://cl.j4q.pw/htm_data/2/1709/2664044.html")
html.encoding = 'utf-8'
# req = urllib2.Request('http://cl.j4q.pw/htm_data/2/1709/2664044.html')
# req.add_header('user-agent', headers())
# html = urllib2.urlopen(req).read()
print html.content
# select = Selector(html.text)
# content =select.xpath('//div//img/@src')
regt = r'<img src="(.*?)" onclick="(?#...)" style="cursor:pointer>"'
hh = re.findall(regt, html)
print hh
# for imgurl in content:
#
# x=datetime.datetime.now()
#
# name = imgurl[-7:-1]
# os.chdir(r"D://pic")
# req = urllib2.Request(imgurl)
# req.add_header('User-agent', headers())
# #html = urllib2.urlopen(req).read().decode('gbk').encode('utf-8')
# response =urllib2.urlopen(req)
# f = open(name,'wb')
# f.write(response.read())
# f.close()
# y=datetime.datetime.now()
#
# print imgurl,(y-x).seconds
|
[
"qianzhongdao@163.com"
] |
qianzhongdao@163.com
|
0588e6013bc4ccd0a97c815853df716c9fa6e040
|
c0ea89d58fd6f780a23f10a0b5535b3feada5a1a
|
/anchore_engine/services/policy_engine/api/models/image_selection_rule.py
|
e0f9abbea332fcca8e57209b3916beb1d02c3c34
|
[
"Apache-2.0"
] |
permissive
|
longfeide2008/anchore-engine
|
b62acbab8c7ebbf7fa67a2503768c677942220e4
|
622786ec653531f4fb216cb33e11ffe31fe33a29
|
refs/heads/master
| 2022-11-08T10:02:51.988961
| 2020-06-15T18:00:37
| 2020-06-15T18:00:37
| 274,068,878
| 1
| 0
|
Apache-2.0
| 2020-06-22T07:27:39
| 2020-06-22T07:27:38
| null |
UTF-8
|
Python
| false
| false
| 4,764
|
py
|
# coding: utf-8
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from anchore_engine.services.policy_engine.api.models.base_model_ import Model
from anchore_engine.services.policy_engine.api.models.image_ref import ImageRef # noqa: F401,E501
from anchore_engine.services.policy_engine.api import util
class ImageSelectionRule(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, name=None, registry=None, repository=None, image=None): # noqa: E501
"""ImageSelectionRule - a model defined in Swagger
:param id: The id of this ImageSelectionRule. # noqa: E501
:type id: str
:param name: The name of this ImageSelectionRule. # noqa: E501
:type name: str
:param registry: The registry of this ImageSelectionRule. # noqa: E501
:type registry: str
:param repository: The repository of this ImageSelectionRule. # noqa: E501
:type repository: str
:param image: The image of this ImageSelectionRule. # noqa: E501
:type image: ImageRef
"""
self.swagger_types = {
'id': str,
'name': str,
'registry': str,
'repository': str,
'image': ImageRef
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'registry': 'registry',
'repository': 'repository',
'image': 'image'
}
self._id = id
self._name = name
self._registry = registry
self._repository = repository
self._image = image
@classmethod
def from_dict(cls, dikt):
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ImageSelectionRule of this ImageSelectionRule. # noqa: E501
:rtype: ImageSelectionRule
"""
return util.deserialize_model(dikt, cls)
@property
def id(self):
"""Gets the id of this ImageSelectionRule.
:return: The id of this ImageSelectionRule.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ImageSelectionRule.
:param id: The id of this ImageSelectionRule.
:type id: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ImageSelectionRule.
:return: The name of this ImageSelectionRule.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ImageSelectionRule.
:param name: The name of this ImageSelectionRule.
:type name: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def registry(self):
"""Gets the registry of this ImageSelectionRule.
:return: The registry of this ImageSelectionRule.
:rtype: str
"""
return self._registry
@registry.setter
def registry(self, registry):
"""Sets the registry of this ImageSelectionRule.
:param registry: The registry of this ImageSelectionRule.
:type registry: str
"""
if registry is None:
raise ValueError("Invalid value for `registry`, must not be `None`") # noqa: E501
self._registry = registry
@property
def repository(self):
"""Gets the repository of this ImageSelectionRule.
:return: The repository of this ImageSelectionRule.
:rtype: str
"""
return self._repository
@repository.setter
def repository(self, repository):
"""Sets the repository of this ImageSelectionRule.
:param repository: The repository of this ImageSelectionRule.
:type repository: str
"""
if repository is None:
raise ValueError("Invalid value for `repository`, must not be `None`") # noqa: E501
self._repository = repository
@property
def image(self):
"""Gets the image of this ImageSelectionRule.
:return: The image of this ImageSelectionRule.
:rtype: ImageRef
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this ImageSelectionRule.
:param image: The image of this ImageSelectionRule.
:type image: ImageRef
"""
if image is None:
raise ValueError("Invalid value for `image`, must not be `None`") # noqa: E501
self._image = image
|
[
"zach@anchore.com"
] |
zach@anchore.com
|
41f4b127bfbd6b75174719694a023c07f6cca470
|
673e829dda9583c8dd2ac8d958ba1dc304bffeaf
|
/data/multilingual/Latn.TZO/Sun-ExtA_16/pdf_to_json_test_Latn.TZO_Sun-ExtA_16.py
|
1ec5da78381362fbe785a67e34d5996d974a7995
|
[
"BSD-3-Clause"
] |
permissive
|
antoinecarme/pdf_to_json_tests
|
58bab9f6ba263531e69f793233ddc4d33b783b7e
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
refs/heads/master
| 2021-01-26T08:41:47.327804
| 2020-02-27T15:54:48
| 2020-02-27T15:54:48
| 243,359,934
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 311
|
py
|
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TZO/Sun-ExtA_16/udhr_Latn.TZO_Sun-ExtA_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
4b803dc11bc61c8e8bfaa692a0a6f248f40f8b06
|
bf885e4a58ac5ab888890e297eafcfca907d7845
|
/hello_world_project/hello_world_project/urls.py
|
d3bf32b3ef4875f4d09711b297f85325df8055ae
|
[] |
no_license
|
manishbalyan/django-hello_world_app
|
c54e4875a9bb3dac7e58224f11e1cf6d60b70463
|
bc53fa0a8d3e57bc085bc113c0d5640521c45e44
|
refs/heads/master
| 2021-01-23T16:28:18.954683
| 2019-02-13T05:55:24
| 2019-02-13T05:55:24
| 38,373,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'hello_world_project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# this link the project urls to app urls
url(r'^admin/', include(admin.site.urls)), url(r'^', include('hello_world.urls')), url(r'^about/', include('hello_world.urls'))
)
|
[
"balyan05.manish@gmail.com"
] |
balyan05.manish@gmail.com
|
71b4c3192c59446446642f2dc38ac6eac594e87f
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_138/1273.py
|
7807dd8fe021579a8ca3aa6fa4f8c90eff1cc487
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
t = int(raw_input())
for i in range(1,t+1):
n = int(raw_input())
line = raw_input().split()
naomi = []
for j in range(0,n):
naomi.append(float(line[j]))
line = raw_input().split()
ken = []
for j in range(0,n):
ken.append(float(line[j]))
naomi = sorted(naomi)
ken = sorted(ken)
ind_ken = 0
ind_naomi = 0
end = False
while ind_ken != n:
while ken[ind_ken] < naomi[ind_naomi]:
ind_ken += 1
if ind_ken == n:
end = True
break
if end:
break
ind_naomi += 1
ind_ken += 1
w = len(naomi) - ind_naomi
dw = 0
while len(ken) > 0:
if ken[len(ken) - 1] < naomi[len(naomi) - 1]:
dw += 1
ken.pop()
naomi.pop()
else:
ken.pop()
naomi.pop(0)
str = "Case #%d: %d %d" % (i, dw, w)
print str
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
39abb1c58a1ae46d15c937d463dbc72c51ee8659
|
b641319ea5164c1eb5db77c819abdd1f8136fce3
|
/random_stream.py
|
26e2a2c280f4736e7a6b65c58e3d223854009094
|
[] |
no_license
|
Anwesh43/theano-starter
|
8d4b2a9e3023f10018f9005ef9a9e4583270fee0
|
87f2d987ce02a883889eac6543b82530d1b90989
|
refs/heads/master
| 2021-01-12T02:48:45.879958
| 2017-01-16T15:35:22
| 2017-01-16T15:35:22
| 78,109,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
import theano.tensor as T
from theano import *
from theano.tensor.shared_randomstreams import RandomStreams
srng = RandomStreams(seed=1000)
r_uv = srng.uniform((2,2))
r_nd = srng.normal((2,2))
rng_val = r_uv.rng.get_value(borrow=True)
rng_val.seed(345)
r_uv.rng.set_value(rng_val,borrow=True)
r_uniform = function([],r_uv)
r_normal = function([],r_nd,no_default_updates=True)
print r_uniform()
print r_normal()
print r_normal()
rnd_val = r_uv.rng.get_value(borrow=True)
state = rnd_val.get_state()
v1 = r_uniform()
v2 = r_uniform()
rnd_val = r_uv.rng.get_value(borrow=True)
rnd_val.set_state(state)
r_nd.rng.set_value(rnd_val)
v3 = r_uniform()
print v1
print v2
print v3
print v1 == v3
|
[
"anweshthecool0@gmail.com"
] |
anweshthecool0@gmail.com
|
1ea4f37d648dbba8cdb93a2e9036c0c97129ecf0
|
8c06beebdb5ee28f7292574fefd540f8c43a7acf
|
/Arctype_Dashboard/asgi.py
|
f843ce0156227c94479067214b7caa5e4e018782
|
[] |
no_license
|
progettazionemauro/ARCTYPE_DJANGO_DASHBOARD
|
0c3baf93c6a3f8dd28d9459a21a273efbed1f4e3
|
60d1dab19c32b7a80d70de85e846fd6760be9a26
|
refs/heads/master
| 2023-04-12T01:37:57.317231
| 2021-05-03T01:48:41
| 2021-05-03T01:48:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
"""
ASGI config for Arctype_Dashboard project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Arctype_Dashboard.settings')
application = get_asgi_application()
|
[
"chukslord1@gmail.com"
] |
chukslord1@gmail.com
|
bb352a077de0a96d708f7bd908b1d4f2e9c8b720
|
aa76391d5789b5082702d3f76d2b6e13488d30be
|
/programmers/Lev1/print_triangle.py
|
aeb50f3fd8f6008928c6bee577e7267406cb4451
|
[] |
no_license
|
B2SIC/python_playground
|
118957fe4ca3dc9395bc78b56825b9a014ef95cb
|
14cbc32affbeec57abbd8e8c4ff510aaa986874e
|
refs/heads/master
| 2023-02-28T21:27:34.148351
| 2021-02-12T10:20:49
| 2021-02-12T10:20:49
| 104,154,645
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
def printTriangle(num):
s = ''
for i in range(1, num + 1):
s += "*" * i + "\n"
return s
print(printTriangle(5))
|
[
"the_basic_@kookmin.ac.kr"
] |
the_basic_@kookmin.ac.kr
|
baa7678b52fae8e25d141a1b921f8006e86a6d26
|
66adad393a638d3a4cc47ed3d8b45b208f155ebe
|
/bookmarks1/account/views.py
|
bdac6a72fc611c4ef5ecf8d9c87d1849eaffa17e
|
[] |
no_license
|
Dyavathrocky/image_sharing
|
a5e265c65fde29c1f665c522230bd73dfbf16c23
|
0939240f9a96dd8c80de813939d79455e95782c7
|
refs/heads/main
| 2023-01-21T15:23:10.141362
| 2020-11-29T13:53:26
| 2020-11-29T13:53:26
| 316,220,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,172
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from .forms import LoginForm, UserRegistrationForm, \
UserEditForm, ProfileEditForm
from .models import Profile
from django.contrib import messages
# Create your views here.
@login_required
def dashboard(request):
return render(request,
'account/dashboard.html', {'section': 'dashboard'})
def user_login(request):
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(request,
username=cd['username'],
password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated '
'successfully')
else:
return HttpResponse('Disabled account')
else:
return HttpResponse('Invalid login')
else:
form = LoginForm()
return render(request, 'account/login.html', {'form': form})
def register(request):
if request.method == 'POST':
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new user object but avoid saving it yet
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data['password'])
# Save the User object
new_user.save()
# Create the user profile
Profile.objects.create(user=new_user)
return render(request,
'account/register_done.html',
{'new_user': new_user})
else:
user_form = UserRegistrationForm()
return render(request,
'account/register.html',
{'user_form': user_form})
@login_required
def edit(request):
if request.method == 'POST':
user_form = UserEditForm(instance=request.user,
data=request.POST)
profile_form = ProfileEditForm(
instance=request.user.profile,
data=request.POST,
files=request.FILES)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, 'Profile updated successfully')
else:
messages.error(request, 'Error updating your profile')
else:
user_form = UserEditForm(instance=request.user)
profile_form = ProfileEditForm(
instance=request.user.profile)
return render(request,
'account/edit.html',
{'user_form': user_form,
'profile_form': profile_form})
|
[
"davathrak@gmail.com"
] |
davathrak@gmail.com
|
e7e0deac411c991076dc18e374867a07a253d989
|
d89a482aaf3001bbc4515f39af9ba474e1ae6062
|
/sip/sip_history.py
|
7bdbef694f14c90a11c7df182424967f95a137dc
|
[] |
no_license
|
hongtao510/u_tool
|
2925e3694aba81714cf83018c3f8520a7b503228
|
98c962cfb1f53c4971fb2b9ae22c882c0fae6497
|
refs/heads/master
| 2021-01-10T20:40:24.793531
| 2014-03-14T22:57:37
| 2014-03-14T22:57:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,527
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 03 13:30:41 2012
@author: jharston
"""
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import os
from uber import uber_lib
import history_tables
import rest_funcs
class SIPHistoryPage(webapp.RequestHandler):
def get(self):
templatepath = os.path.dirname(__file__) + '/../templates/'
ChkCookie = self.request.cookies.get("ubercookie")
html = uber_lib.SkinChk(ChkCookie, "SIP User History")
html = html + template.render(templatepath + '02uberintroblock_wmodellinks.html', {'model':'sip','page':'history'})
html = html + template.render(templatepath + '03ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberalgorithm_start.html', {
'model':'sip',
'model_attributes':'SIP User History'})
html = html + template.render (templatepath + 'history_pagination.html', {})
hist_obj = rest_funcs.user_hist('admin', 'sip')
html = html + history_tables.table_all(hist_obj)
html = html + template.render(templatepath + '04ubertext_end.html', {})
html = html + template.render(templatepath + '06uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', SIPHistoryPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
|
[
"hongtao510@gmail.com"
] |
hongtao510@gmail.com
|
d57dc6bd6e6ed40663cea90c3cb805e43497b4f9
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/news/models_20201124144813.py
|
df865398f4c40cdf05ca57629f9dae3cd204713b
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415
| 2021-01-05T14:55:45
| 2021-01-05T14:55:45
| 303,961,094
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
from django.db import models
from wagtail.contrib.forms.models import AbstractEmailForm
# Create your models here.
class FormField(AbstractEmailForm):
page = ParentalKey(
'NewsPage',
on_delete=models.CASCADE,
related_name
)
class NewsPage(AbstractEmailForm):
tempalte ='news/news_page.html'
leanding_page_template = 'news/news_page_leading.html'
subpage_types = []
max_coun = 1
intro = RichTextField(blank=True, features=['bold', 'italic', 'ol', 'ul'])
thank_you_text = RichTextField(
blank=True,
features=['bold', 'italic', 'ol', 'ul'])
map_image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=False,
on_delete=models.SET_NULL,
help_text='Obrazek będzie przycięty do rozmairu 588px na 355 px',
related_name='+',
)
map_url = models.URLField(
blank=True,
help_text='Opcjonalne. Jeśli podasz tutaj łączę, obraz stanie się łączem.'
)
content_panels = AbstractEmailForm.content_panel + [
FieldPanel('intro'),
ImageChooserPanel('map_iamge'),
FieldPanel('map_url'),
InlinePanel('form_fields', label="Form Fields"),
FieldPanel('thank_you_text'),
FieldPanel('from_address'),
FieldPanel('to_address'),
FieldPanel('subject'),
]
|
[
"rucinska.patrycja@gmail.com"
] |
rucinska.patrycja@gmail.com
|
a8f2cafb277643c76e1a634c5fcab184d07b9eb5
|
04b1803adb6653ecb7cb827c4f4aa616afacf629
|
/third_party/blink/web_tests/external/wpt/tools/third_party/pytest/src/_pytest/_code/__init__.py
|
815c13b42c25bd314988dbaa7ff9f4e3d1d2e5c2
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
Samsung/Castanets
|
240d9338e097b75b3f669604315b06f7cf129d64
|
4896f732fc747dfdcfcbac3d442f2d2d42df264a
|
refs/heads/castanets_76_dev
| 2023-08-31T09:01:04.744346
| 2021-07-30T04:56:25
| 2021-08-11T05:45:21
| 125,484,161
| 58
| 49
|
BSD-3-Clause
| 2022-10-16T19:31:26
| 2018-03-16T08:07:37
| null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
""" python inspection/code generation API """
from __future__ import absolute_import, division, print_function
from .code import Code # noqa
from .code import ExceptionInfo # noqa
from .code import Frame # noqa
from .code import Traceback # noqa
from .code import getrawcode # noqa
from .source import Source # noqa
from .source import compile_ as compile # noqa
from .source import getfslineno # noqa
|
[
"sunny.nam@samsung.com"
] |
sunny.nam@samsung.com
|
4dde79d5e3be0ffc2d8fdc9b8d3237fd2be57c5b
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/Dowsure.py
|
c4c979d73ab936957a9778dbb5945bfedab00234
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 961
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Dowsure(object):
def __init__(self):
self._application_code = None
@property
def application_code(self):
return self._application_code
@application_code.setter
def application_code(self, value):
self._application_code = value
def to_alipay_dict(self):
params = dict()
if self.application_code:
if hasattr(self.application_code, 'to_alipay_dict'):
params['application_code'] = self.application_code.to_alipay_dict()
else:
params['application_code'] = self.application_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Dowsure()
if 'application_code' in d:
o.application_code = d['application_code']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
4905f1162de481d5c10d057cf7e2d91f01cd6fba
|
a3d32e0ff84958d194ced642441f5379c0032465
|
/tests/functions/test_image_train_process.py
|
3fe7e75cf95dfa56d3155c3a714ddfd2389acd77
|
[] |
no_license
|
TensorMSA/tensormsa_old
|
406755511d05d4ec179c085337a05f73c0dde80a
|
ef058737f391de817c74398ef9a5d3a28f973c98
|
refs/heads/master
| 2021-06-18T11:58:29.349060
| 2017-04-20T10:17:43
| 2017-04-20T10:17:43
| 67,384,681
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,430
|
py
|
import unittest, requests, os, json,random
from tfmsacore.utils.logger import tfmsa_logger
from django.core.files.uploadedfile import TemporaryUploadedFile
from tfmsacore.data import ImageManager
class TestImageTrainProcess(unittest.TestCase):
"""
./manage.py jenkins ./tests/functions --enable-coverage
./manage.py jenkins ./tests/functions
"""
rand_name = str(random.randrange(1,99999))
def test_image_train(self):
host_name = "{0}:{1}".format(os.environ['HOSTNAME'], "8989")
tfmsa_logger("[1] Image file format update")
resp = requests.post('http://' + host_name + '/api/v1/type/imagefile/base/mes/table/testtable2/format/nn0000090/',
json={"x_size": 32,"y_size": 32 })
if(json.loads(resp.json())['status'] != "200"):
raise Exception ("RESI Service Fail")
tfmsa_logger("[2] Network info update")
resp = requests.post('http://' + host_name + '/api/v1/type/common/nninfo/',
json={
"nn_id": "nn0000090",
"category": "SCM",
"subcate": "csv",
"name": "CENSUS_INCOME",
"desc": "INCOME PREDICT"
})
if (json.loads(resp.json())['status'] != "200"):
raise Exception("RESI Service Fail")
tfmsa_logger("[3] Network configuration update")
resp = requests.post('http://' + host_name + '/api/v1/type/cnn/conf/nn0000090/',
json={
"data":
{
"datalen": 1024,
"taglen": 2,
"matrix": [32, 32],
"learnrate": 0.01,
"epoch": 10
},
"layer":
[
{
"type": "input",
"active": "relu",
"cnnfilter": [2, 2],
"cnnstride": [2, 2],
"maxpoolmatrix": [2, 2],
"maxpoolstride": [2, 2],
"node_in_out": [1, 16],
"regualizer": "",
"padding": "SAME",
"droprate": ""
},
{
"type": "cnn",
"active": "relu",
"cnnfilter": [2, 2],
"cnnstride": [2, 2],
"maxpoolmatrix": [2, 2],
"maxpoolstride": [2, 2],
"node_in_out": [16, 32],
"regualizer": "",
"padding": "SAME",
"droprate": ""
},
{
"type": "reshape",
},
{
"type": "drop",
"active": "relu",
"regualizer": "",
"droprate": "0.5"
},
{
"type": "out",
"active": "softmax",
"cnnfilter": "",
"cnnstride": "",
"maxpoolmatrix": "",
"maxpoolstride": "",
"node_in_out": [32, 2],
"regualizer": "",
"padding": "SAME",
"droprate": ""
}
]
})
if (json.loads(resp.json())['status'] != "200"):
raise Exception("RESI Service Fail")
tfmsa_logger("[4] Train Neural Network")
resp = requests.post('http://' + host_name + '/api/v1/type/cnn/train/nn0000090/',
json={
"epoch": "10",
"testset": "10"
})
if (json.loads(resp.json())['status'] != "200"):
raise Exception("RESI Service Fail")
tfmsa_logger("[5] PASS TEST")
|
[
"tmddno1@naver.com"
] |
tmddno1@naver.com
|
7cfee5b9df13834712ed1c7dfcb5aaac39cd1210
|
cd8f7ecd20c58ce1ae0fe3840f7c7ee961aa5819
|
/Third Maximum Number.py
|
e33e7d88fe709f5f961c58fc1b2f6c3993b73f63
|
[
"Apache-2.0"
] |
permissive
|
sugia/leetcode
|
9b0f2a3521b088f8f7e5633c2c6c17c76d33dcaf
|
6facec2a54d1d9f133f420c9bce1d1043f57ebc6
|
refs/heads/master
| 2021-06-05T07:20:04.099488
| 2021-02-24T07:24:50
| 2021-02-24T07:24:50
| 29,124,136
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 855
|
py
|
'''
Given a non-empty array of integers, return the third maximum number in this array. If it does not exist, return the maximum number. The time complexity must be in O(n).
Example 1:
Input: [3, 2, 1]
Output: 1
Explanation: The third maximum is 1.
Example 2:
Input: [1, 2]
Output: 2
Explanation: The third maximum does not exist, so the maximum (2) is returned instead.
Example 3:
Input: [2, 2, 3, 1]
Output: 1
Explanation: Note that the third maximum here means the third maximum distinct number.
Both numbers with value 2 are both considered as second maximum.
'''
class Solution(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
vec = sorted(list(set(nums)), reverse = True)
if len(vec) > 2:
return vec[2]
return vec[0]
|
[
"noreply@github.com"
] |
sugia.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.