blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d208cb86e1da4bd00d174eed3f2d2dd895b22964
|
9af2aef1e0d1033217179394bff6344858da9f4d
|
/python/chronos/test/bigdl/chronos/model/tf2/test_Seq2Seq_keras.py
|
f931bb30d652d232574b5d72337c303cc4f9a892
|
[
"Apache-2.0"
] |
permissive
|
analytics-zoo/BigDL
|
9d5fec6dea5eaf458ef87edbbf2bd4d5e4a24f4d
|
6c18e9337b649df3a975f8293c15fcd075d4f11f
|
refs/heads/main
| 2023-03-16T04:42:14.787392
| 2022-11-28T08:44:03
| 2022-11-28T08:44:03
| 557,722,893
| 2
| 0
|
Apache-2.0
| 2023-01-18T14:23:55
| 2022-10-26T07:20:43
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,776
|
py
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import tensorflow as tf
import pytest
from unittest import TestCase
from bigdl.chronos.model.tf2.Seq2Seq_keras import LSTMSeq2Seq, model_creator
import numpy as np
def create_data():
train_num_samples = 1000
test_num_samples = 400
input_feature_num = 10
output_feature_num = 2
past_seq_len = 10
future_seq_len = np.random.randint(1, 5)
def get_x_y(num_samples):
x = np.random.randn(num_samples, past_seq_len, input_feature_num)
y = np.random.randn(num_samples, future_seq_len, output_feature_num)
return x, y
train_data = get_x_y(train_num_samples)
test_data = get_x_y(test_num_samples)
return train_data, test_data
@pytest.mark.skipif(tf.__version__ < '2.0.0', reason="Run only when tf>2.0.0.")
class TestSeq2Seq(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_seq2seq_fit_predict_evaluate(self):
train_data, test_data = create_data()
model = model_creator(config={
"input_feature_num": 10,
"output_feature_num": 2,
"future_seq_len": test_data[-1].shape[1],
"lstm_hidden_dim": 32
})
model.fit(train_data[0],
train_data[1],
epochs=2,
validation_data=test_data)
yhat = model.predict(test_data[0])
model.evaluate(test_data[0], test_data[1])
assert yhat.shape == (400, train_data[-1].shape[1], 2)
def test_seq2seq_save_load(self):
train_data, test_data = create_data()
model = model_creator(config={
"input_feature_num": 10,
"output_feature_num": 2,
"future_seq_len": test_data[-1].shape[1],
"lstm_hidden_dim": 32
})
model.fit(train_data[0],
train_data[1],
epochs=2,
validation_data=test_data)
with tempfile.TemporaryDirectory() as tmp_dir_file:
model.save(tmp_dir_file)
import keras
restore_model = keras.models.load_model(tmp_dir_file,
custom_objects={"LSTMSeq2Seq": LSTMSeq2Seq})
model_res = model.evaluate(test_data[0], test_data[1])
restore_model_res = restore_model.evaluate(test_data[0], test_data[1])
np.testing.assert_almost_equal(model_res, restore_model_res, decimal=5)
assert isinstance(restore_model, LSTMSeq2Seq)
def test_seq2seq_freeze_training(self):
train_data, test_data = create_data()
model = model_creator(config={
"input_feature_num": 10,
"output_feature_num": 2,
"future_seq_len": test_data[-1].shape[1],
"lstm_hidden_dim": 32
})
freeze_yhat = model(test_data[0], training=False)
_freeze_yhat = model(test_data[0], training=False)
assert np.all(_freeze_yhat == freeze_yhat)
_unfreeze_yhat = model(test_data[0], training=True)
unfreeze_yhat = model(test_data[0], training=True)
assert np.any(_unfreeze_yhat != unfreeze_yhat)
if __name__ == '__main__':
pytest.main([__file__])
|
[
"noreply@github.com"
] |
analytics-zoo.noreply@github.com
|
743aee244af3e5076a2c6eced6ffea7266f9cf44
|
b501a5eae1018c1c26caa96793c6ee17865ebb2d
|
/Asyncio/asyncio_coroutine_return.py
|
33de7d2adede0d01e538c965d748f6871d4e2b33
|
[] |
no_license
|
jincurry/standard_Library_Learn
|
12b02f9e86d31ca574bb6863aefc95d63cc558fc
|
6c7197f12747456e0f1f3efd09667682a2d1a567
|
refs/heads/master
| 2022-10-26T07:28:36.545847
| 2018-05-04T12:54:50
| 2018-05-04T12:54:50
| 125,447,397
| 0
| 1
| null | 2022-10-02T17:21:50
| 2018-03-16T01:32:50
|
Python
|
UTF-8
|
Python
| false
| false
| 348
|
py
|
import asyncio
async def coroutine():
print('in coroutine')
return 'result'
event_loop = asyncio.get_event_loop()
try:
print('Starting')
return_value = event_loop.run_until_complete(
coroutine()
)
print('return from coroutine')
print('it returned: {!r}'.format(return_value))
finally:
event_loop.close()
|
[
"jintao422516@gmail.com"
] |
jintao422516@gmail.com
|
5cffbe8f48e68fffb7131c55cf9163e826b0549a
|
9bc228372e586a1f90bb0685c43e744be9638ecd
|
/18_배연주/session08/blogproject/urls.py
|
9a3215ba5702b7c22567450884a56611561f2a6b
|
[
"MIT"
] |
permissive
|
LikeLionSCH/9th_ASSIGNMENT
|
3e58862a76e3232aed7e19e8939da23330ff2e22
|
c211995ad12f404833ffec7fd80e1229b82a3bfa
|
refs/heads/master
| 2023-07-03T10:27:11.843177
| 2021-08-02T14:52:02
| 2021-08-02T14:52:02
| 379,633,279
| 7
| 18
|
MIT
| 2021-08-02T14:52:03
| 2021-06-23T14:36:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,043
|
py
|
"""blogproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('blog/', include('blog.urls')),
path('account/', include('account.urls')),
path('restaurant/', include('restaurant.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"mmy789@sch.ac.kr"
] |
mmy789@sch.ac.kr
|
2aca437b37742fa916d9eb0f1fd52264d265fc4a
|
07aa9b5a07df2a80b7d899da1da63c84b1060fec
|
/src/iegen/ast/visitor/_collect_symbolics_visitor.py
|
88c6cd9d5322f63bf3ec2910d6d0d9b99e1c6427
|
[] |
no_license
|
lamielle/iegen
|
f26da812a01557daca086e0a1c76a62af8fe7cd4
|
0f48edad8d14ae18c907d705751552cf6eb53c8e
|
refs/heads/master
| 2016-09-05T12:48:23.698779
| 2010-12-14T19:17:13
| 2010-12-14T19:17:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
from iegen.ast.visitor import DFVisitor
class CollectSymbolicsVisitor(DFVisitor):
def __init__(self):
self.symbolics=set()
def _outFormula(self,node):
self.symbolics=sorted(list(self.symbolics))
def outSet(self,node):
self._outFormula(node)
def outRelation(self,node):
self._outFormula(node)
def _outPresFormula(self,node):
self.symbolics=self.symbolics.union(set([sym.name for sym in node.symbolics]))
def outPresSet(self,node):
self._outPresFormula(node)
def outPresRelation(self,node):
self._outPresFormula(node)
|
[
"lamielle@cs.colostate.edu"
] |
lamielle@cs.colostate.edu
|
bc5aff48d559df398e4a2f66aaeeb0e17884dd6b
|
a1bccead14fe67f560ca54aad98dbb2367c7568b
|
/tensorpack/tfutils/common.py
|
42588423b3fea4d2fd9c3f858dff7f6cb15af704
|
[
"Apache-2.0"
] |
permissive
|
Peratham/tensorpack
|
9ea7e714b41de1aa4393454d2fa0a88d3b7568b9
|
e21fc267c0ada1377bffcc008dad31c28326690d
|
refs/heads/master
| 2021-01-11T17:46:10.488888
| 2017-01-23T17:13:43
| 2017-01-23T17:13:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,547
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: common.py
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
from ..utils.naming import GLOBAL_STEP_VAR_NAME, GLOBAL_STEP_OP_NAME
import tensorflow as tf
from copy import copy
import six
from contextlib import contextmanager
__all__ = ['get_default_sess_config',
'get_global_step',
'get_global_step_var',
'get_op_tensor_name',
'get_tensors_by_names',
'get_op_or_tensor_by_name',
'backup_collection',
'restore_collection',
'clear_collection',
'freeze_collection',
'get_tf_version',
'get_name_scope_name'
]
def get_default_sess_config(mem_fraction=0.99):
"""
Return a better session config to use as default.
Tensorflow default session config consume too much resources.
Args:
mem_fraction(float): fraction of memory to use.
Returns:
tf.ConfigProto: the config to use.
"""
conf = tf.ConfigProto()
conf.gpu_options.per_process_gpu_memory_fraction = mem_fraction
conf.gpu_options.allocator_type = 'BFC'
conf.gpu_options.allow_growth = True
conf.allow_soft_placement = True
# conf.log_device_placement = True
return conf
def get_global_step_var():
"""
Returns:
tf.Tensor: the global_step variable in the current graph. create if
doesn't exist.
"""
try:
return tf.get_default_graph().get_tensor_by_name(GLOBAL_STEP_VAR_NAME)
except KeyError:
scope = tf.get_variable_scope()
assert scope.name == '', \
"Creating global_step_var under a variable scope would cause problems!"
with tf.variable_scope(scope, reuse=False):
var = tf.get_variable(GLOBAL_STEP_OP_NAME, shape=[],
initializer=tf.constant_initializer(dtype=tf.int32),
trainable=False, dtype=tf.int32)
return var
def get_global_step():
"""
Returns:
float: global_step value in current graph and session"""
return tf.train.global_step(
tf.get_default_session(),
get_global_step_var())
def get_op_tensor_name(name):
"""
Will automatically determine if ``name`` is a tensor name (ends with ':x')
or a op name.
If it is an op name, the corresponding tensor name is assumed to be ``op_name + ':0'``.
Args:
name(str): name of an op or a tensor
Returns:
tuple: (op_name, tensor_name)
"""
if len(name) >= 3 and name[-2] == ':':
return name[:-2], name
else:
return name, name + ':0'
def get_tensors_by_names(names):
"""
Get a list of tensors in the default graph by a list of names.
Args:
names (list):
"""
ret = []
G = tf.get_default_graph()
for n in names:
opn, varn = get_op_tensor_name(n)
ret.append(G.get_tensor_by_name(varn))
return ret
def get_op_or_tensor_by_name(name):
G = tf.get_default_graph()
if len(name) >= 3 and name[-2] == ':':
return G.get_tensor_by_name(name)
else:
return G.get_operation_by_name(name)
def backup_collection(keys):
"""
Args:
keys (list): list of collection keys to backup
Returns:
dict: the backup
"""
ret = {}
for k in keys:
ret[k] = copy(tf.get_collection(k))
return ret
def restore_collection(backup):
"""
Restore from a collection backup.
Args:
backup (dict):
"""
for k, v in six.iteritems(backup):
del tf.get_collection_ref(k)[:]
tf.get_collection_ref(k).extend(v)
def clear_collection(keys):
"""
Clear some collections.
Args:
keys(list): list of collection keys.
"""
for k in keys:
del tf.get_collection_ref(k)[:]
@contextmanager
def freeze_collection(keys):
"""
Args:
keys(list): list of collection keys to freeze.
Returns:
a context where the collections are in the end restored to its initial state.
"""
backup = backup_collection(keys)
yield
restore_collection(backup)
def get_tf_version():
"""
Returns:
int:
"""
return int(tf.__version__.split('.')[1])
def get_name_scope_name():
"""
Returns:
str: the name of the current name scope, without the ending '/'.
"""
g = tf.get_default_graph()
s = "RANDOM_STR_ABCDEFG"
unique = g.unique_name(s)
scope = unique[:-len(s)].rstrip('/')
return scope
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
4ea7324dc735700864355bf887688d287410ce00
|
3529ecaa44a53172094ba13498097057c8972723
|
/Questiondir/681.next-closest-time/681.next-closest-time_120259498.py
|
e5e0ccb34cb9903d4bb82ad4979a9600dfb35202
|
[] |
no_license
|
cczhong11/Leetcode-contest-code-downloader
|
0681f0f8c9e8edd5371fd8d0a1d37dcc368566b6
|
db64a67869aae4f0e55e78b65a7e04f5bc2e671c
|
refs/heads/master
| 2021-09-07T15:36:38.892742
| 2018-02-25T04:15:17
| 2018-02-25T04:15:17
| 118,612,867
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
from datetime import datetime, timedelta
def check(dt, time_str):
dt_str = datetime.strftime(dt, "%H%M")
_time_str = set(list(dt_str))
if _time_str & time_str == _time_str:
return True
return False
class Solution(object):
def nextClosestTime(self, time):
"""
:type time: str
:rtype: str
"""
time_str = set(list(time.replace(':', '')))
time = datetime.strptime(time, "%H:%M")
while True:
time = time + timedelta(minutes=1)
if check(time, time_str):
return time.strftime("%H:%M")
|
[
"tczhong24@gmail.com"
] |
tczhong24@gmail.com
|
12dc6835334446f6a874d6a58eaed4579d21533c
|
489a45659476fafb66934427e42bfce3d60a0116
|
/Assets/Python/BUG/CityUtil.py
|
f796f4f687860987bd082b19c841cbcfb1108044
|
[] |
no_license
|
billw2012/Caveman2Cosmos
|
3a8c6ea347e75dbe2de9519fe70e6b38e0cf6dbe
|
2382877536e1669972dd024ce2d0f3d0d5ffd988
|
refs/heads/master
| 2020-07-19T00:14:48.856106
| 2019-09-03T23:20:42
| 2019-09-03T23:21:02
| 197,989,388
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
## CityUtil
##
## Collection of utility functions for dealing with cities.
##
## Copyright (c) 2009 The BUG Mod.
##
## Author: EmperorFool
from CvPythonExtensions import *
## Globals
gc = CyGlobalContext()
## Growth and Starvation
def willGrowThisTurn(city):
"""
Returns True if <city> will increase its population due to growth this turn.
Emphasize No Growth must be off for the city, and its food rate plus storage must reach the growth threshold.
"""
return not city.AI_isEmphasize(5) and city.getFood() + city.foodDifference(True) >= city.growthThreshold()
def willShrinkThisTurn(city):
"""
Returns True if <city> will decrease its population due to starvation this turn.
It must have at least two population, and its food rate plus storage must be negative.
"""
return city.getPopulation() > 1 and city.getFood() + city.foodDifference(True) < 0
|
[
"alberts2@live.de"
] |
alberts2@live.de
|
3a0de6c4abfd97a3c5efda00e2039e3beaa66d28
|
1587d5444e18bea9b1c9cbe1a01c2f2aa03892d8
|
/root/db.py
|
075284412ac455bf9d4b2cf442784a5db6f7cda2
|
[] |
no_license
|
SofiiaShumel/new_flask
|
c5593e1df21023695ed287a879e4e77da6321bbc
|
62692a7de5eb328b2ba2fec9e1a5ff7f98ccefb2
|
refs/heads/master
| 2020-09-28T08:56:48.348025
| 2019-12-07T22:31:27
| 2019-12-07T22:31:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,633
|
py
|
import sqlalchemy as db
from sqlalchemy import MetaData, Table, Column
from sqlalchemy import create_engine, Column, String, Integer, ForeignKey, Float, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
import root.credentials
from root.entities import Player, Bet, Bank, Casino, Usernames, Country
class Database():
# replace the user, password, hostname and database according to your configuration according to your information
cstr = 'postgresql://{user}:{password}@{hostname}/{database}'.format(
user=root.credentials.username,
password=root.credentials.password,
hostname=root.credentials.host,
database=root.credentials.database
)
engine = db.create_engine(cstr)
def __init__(self):
self.connection = self.engine.connect()
print("DB Instance created")
# Player
def createPlayer(self, player):
session = Session(bind=self.connection)
session.add(player)
session.commit()
print("Player created successfully!")
def updatePlayer(self, player_id, player_balance, player_passwd):
session = Session(bind=self.connection)
dataToUpdate = {Player.balance: player_balance, Player.passwrd: player_passwd}
playerData = session.query(Player).filter(Player.player_id == player_id)
playerData.update(dataToUpdate)
session.commit()
print("Player updated successfully!")
def fetchAllPlayers(self):
self.session = Session(bind=self.connection)
players = self.session.query(Player).all()
return players
def fetchPlayer(self, player_id):
self.session = Session(bind=self.connection)
player = self.session.query(Player).filter(Player.player_id == player_id).first()
return player
def deletePlayer(self, player_id):
session = Session(bind=self.connection)
playerData = session.query(Player).filter(Player.player_id == player_id).first()
session.delete(playerData)
session.commit()
print("Player deleted successfully!")
# username
def delete_username(self, player_id):
session = Session(bind=self.connection)
playerData = session.query(Usernames).filter(Usernames.player_id == player_id).first()
session.delete(playerData)
session.commit()
# Bet
def createBet(self, bet):
session = Session(bind=self.connection)
session.add(bet)
session.commit()
print("Bet created successfully!")
def updateBet(self, bet_id, bet_money, won_money, won_bet, bet_time):
session = Session(bind=self.connection)
dataToUpdate = {Bet.bet_money: bet_money, Bet.won_money: won_money,
Bet.won_bet: won_bet, Bet.bet_time: bet_time}
betData = session.query(Bet).filter(Bet.bet_id == bet_id)
betData.update(dataToUpdate)
session.commit()
print("Bet updated successfully!")
def fetchAllBets(self):
self.session = Session(bind=self.connection)
bets = self.session.query(Bet).all()
return bets
def fetchBet(self, bet_id):
self.session = Session(bind=self.connection)
bet = self.session.query(Bet).filter(Bet.bet_id == bet_id).first()
return bet
def deleteBet(self, bet_id):
session = Session(bind=self.connection)
betData = session.query(Bet).filter(Bet.bet_id == bet_id).first()
session.delete(betData)
session.commit()
print("Bet deleted successfully!")
# Country
def createCountry(self, country):
session = Session(bind=self.connection)
session.add(country)
session.commit()
print("Country created successfully!")
def fetchAllCountries(self):
self.session = Session(bind=self.connection)
countries = self.session.query(Country).all()
return countries
# Bank
def createBank(self, bank):
session = Session(bind=self.connection)
session.add(bank)
session.commit()
print("Bank created successfully!")
def updateBank(self, player_id, sold_time, sold_coins):
session = Session(bind=self.connection)
dataToUpdate = {Bank.sold_time: sold_time, Bank.sold_coins: sold_coins}
betData = session.query(Bank).filter(Bank.player_id == player_id)
betData.update(dataToUpdate)
session.commit()
print("Bank updated successfully!")
def updateBankWithTime(self, player_id, sold_time, sold_coins):
session = Session(bind=self.connection)
dataToUpdate = {Bank.sold_coins: sold_coins}
bankData = session.query(Bank).filter(Bank.player_id == player_id).filter(Bank.sold_time == sold_time)
bankData.update(dataToUpdate)
session.commit()
print("Bank updated successfully!")
def fetchAllBanks(self):
self.session = Session(bind=self.connection)
banks = self.session.query(Bank).all()
return banks
def fetchBank(self, player_id, sold_time):
self.session = Session(bind=self.connection)
bank = self.session.query(Bank).filter(Bank.player_id == player_id).filter(Bank.sold_time == sold_time).first()
return bank
def deleteBank(self, player_id, sold_time):
session = Session(bind=self.connection)
bankData = session.query(Bank).filter(Bank.player_id == player_id).filter(
Bank.sold_time == sold_time).filter().first()
session.delete(bankData)
session.commit()
print("Bank deleted successfully!")
|
[
"vovapasko1699@gmail.com"
] |
vovapasko1699@gmail.com
|
fe6a27b00a35cf7a6c20a465f2d80f20bdeba796
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03456/s642363983.py
|
af102a3e07e6002a5cb1a2702d7ff5d68895690d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
import math
a, b = map(int, input().split())
x = int(str(a) + str(b))
if math.sqrt(x) == math.ceil(math.sqrt(x)):
print("Yes")
else :
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
118469b2bda66288057c7bdac81c6069f2397253
|
9f6ea967d84c37d04543d72edabce4dea0517a4f
|
/all_scraper/Service/JingDongScraper/get_keywords_url_id/get_review_productId.py
|
91b08b7ba891c843eec95dc408c44859aef45938
|
[] |
no_license
|
GongSong/All_Scraper
|
8fae34851b8c4b31ab1ae47f39d511a0869c59ef
|
f2bacc8416ed2e611e5e8515d34ec12fd5f10018
|
refs/heads/master
| 2020-07-22T08:46:32.347936
| 2017-08-03T02:17:01
| 2017-08-03T02:17:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 965
|
py
|
#-*-coding:utf-8*-
from Service.JingDongScraper.db_operation.db_connection import DB_connection
from Service.JingDongScraper.db_operation.db_operation import DB_operation
class get_review_productId():
# 这里获取的是productId
# 评论(json)页的url拼接为https://sclub.jd.com/comment/productPageComments.action?productId=4265472&score=0&sortType=5&page=1&pageSize=10
def get_review_ProductId(self):
try:
Product_Id = []
# 连接数据库
db = DB_connection('localhost', 3306, 'root', '123123', 'All_Scraper', 'utf8')
conn = db.connects()
mapper = DB_operation(conn)
# 将url提取出来
sql_get_ProductId_url = "SELECT sku FROM All_Scraper.jd_keywords;"
Product_Id = mapper.select(sql_get_ProductId_url)
conn.commit()
conn.close()
return Product_Id
except Exception as err:
print err
|
[
"javen.xi@atommatrix.com"
] |
javen.xi@atommatrix.com
|
bd2e73826e49ec47995d39f344229610c89341f1
|
af7e20b979c198f235ba51a160c81e0856af0cc7
|
/tests/test_karma_parser.py
|
32af2ec042e41d2bced6250b7acdaeb7d9c40210
|
[
"MIT"
] |
permissive
|
the-Bruce/apollo
|
1ce3b8abd184b180c24acbe966571c8b294cc04c
|
7458785f94cde1dfe3e21451504916d8e5d50ae8
|
refs/heads/master
| 2020-05-15T14:21:01.324314
| 2019-02-11T13:32:40
| 2019-02-11T13:32:40
| 182,333,921
| 1
| 0
| null | 2019-04-19T22:44:17
| 2019-04-19T22:44:17
| null |
UTF-8
|
Python
| false
| false
| 5,868
|
py
|
import os
import pytest
from alembic import command
from alembic.config import Config
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from karma.parser import parse_message, RawKarma
from models import Base
@pytest.fixture(scope='module')
def database():
# Locate the testing config for Alembic
config = Config(os.path.join(os.path.dirname(__file__), '../alembic.tests.ini'))
# Start up the in-memory database instance
db_engine = create_engine('sqlite:///:memory:')
Base.metadata.create_all(db_engine)
db_session = Session(bind=db_engine)
# Mark it as up-to-date with migrations
command.stamp(config, 'head')
return db_session
def test_empty(database):
assert parse_message('', database) is None
def test_empty_with_code_block(database):
assert parse_message('```FoobarBaz```', database) is None
def test_simple_positive(database):
assert parse_message('Foobar++', database) == [RawKarma(name='Foobar', op='++', reason=None)]
def test_simple_negative(database):
assert parse_message('Foobar--', database) == [RawKarma(name='Foobar', op='--', reason=None)]
def test_simple_neutral_pm(database):
assert parse_message('Foobar+-', database) == [RawKarma(name='Foobar', op='+-', reason=None)]
def test_simple_neutral_mp(database):
assert parse_message('Foobar-+', database) == [RawKarma(name='Foobar', op='-+', reason=None)]
def test_quoted_positive(database):
assert parse_message('"Foobar"++', database) == [RawKarma(name='Foobar', op='++', reason=None)]
def test_quoted_negative(database):
assert parse_message('"Foobar"--', database) == [RawKarma(name='Foobar', op='--', reason=None)]
def test_quoted_neutral_pm(database):
assert parse_message('"Foobar"+-', database) == [RawKarma(name='Foobar', op='+-', reason=None)]
def test_quoted_neutral_mp(database):
assert parse_message('"Foobar"-+', database) == [RawKarma(name='Foobar', op='-+', reason=None)]
def test_simple_positive_with_text_after(database):
assert parse_message('Foobar++ since it\'s pretty cool', database) == [
RawKarma(name='Foobar', op='++', reason=None)
]
def test_simple_positive_with_paren_reason(database):
assert parse_message('Foobar++ (hella cool)', database) == [
RawKarma(name='Foobar', op='++', reason='hella cool')
]
def test_simple_positive_with_empty_paren_reason(database):
assert parse_message('Foobar++ ()', database) == [RawKarma(name='Foobar', op='++', reason=None)]
def test_simple_positive_with_compound_reason(database):
assert parse_message('Foobar++ because it is (hella cool)', database) == [
RawKarma(name='Foobar', op='++', reason='it is (hella cool)')
]
def test_simple_positive_with_reason(database):
assert parse_message('Foobar++ because baz', database) == [
RawKarma(name='Foobar', op='++', reason='baz')
]
def test_simple_negative_with_reason(database):
assert parse_message('Foobar-- because baz', database) == [
RawKarma(name='Foobar', op='--', reason='baz')
]
def test_simple_neutral_pm_with_reason(database):
assert parse_message('Foobar+- because baz', database) == [
RawKarma(name='Foobar', op='+-', reason='baz')
]
def test_simple_neutral_mp_with_reason(database):
assert parse_message('Foobar-+ because baz', database) == [
RawKarma(name='Foobar', op='-+', reason='baz')
]
def test_quoted_positive_with_reason(database):
assert parse_message('Foobar++ because baz', database) == [
RawKarma(name='Foobar', op='++', reason='baz')
]
def test_quoted_negative_with_reason(database):
assert parse_message('Foobar-- because baz', database) == [
RawKarma(name='Foobar', op='--', reason='baz')
]
def test_quoted_neutral_pm_with_reason(database):
assert parse_message('Foobar+- because baz', database) == [
RawKarma(name='Foobar', op='+-', reason='baz')
]
def test_quoted_neutral_mp_with_reason(database):
assert parse_message('Foobar-+ because baz', database) == [
RawKarma(name='Foobar', op='-+', reason='baz')
]
def test_simple_multiple_karma(database):
assert parse_message('Foobar++, Baz-- Blat+-', database) == [
RawKarma(name='Foobar', op='++', reason=None),
RawKarma(name='Baz', op='--', reason=None),
RawKarma(name='Blat', op='+-', reason=None)
]
def test_simple_multiple_karma_with_reasons_and_quotes(database):
assert parse_message('Foobar++ because baz blat, "Hello world"--', database) == [
RawKarma(name='Foobar', op='++', reason='baz blat'),
RawKarma(name='Hello world', op='--', reason=None)
]
def test_karma_op_no_token(database):
assert parse_message('++', database) is None
def test_simple_invalid(database):
assert parse_message('Foo+', database) is None
def test_simple_invalid_with_reason(database):
assert parse_message('Foo+ because baz', database) is None
def test_start_simple_mid_message(database):
assert parse_message('Hello, world! Foo++', database) == [
RawKarma(name='Foo', op='++', reason=None)
]
def test_start_simple_mid_message_with_reason(database):
assert parse_message('Hello, world! Foo++ because bar', database) == [
RawKarma(name='Foo', op='++', reason='bar')
]
def test_code_block_with_internal_reason(database):
assert parse_message('```Foobar baz because foo```', database) is None
def test_code_block_with_karma_op_after(database):
assert parse_message('```Foobar baz```++', database) is None
def test_code_block_external_reason(database):
assert parse_message('```Foobar baz``` because foo', database) is None
def test_code_block_with_karma_op_after_and_external_reason(database):
assert parse_message('```Foobar baz```++ because foo', database) is None
|
[
"david@tankski.co.uk"
] |
david@tankski.co.uk
|
8909cbc55ed2f07c029ca60c0a49f99e63764afb
|
61b0a6041d96c1b977f07aed9e4f6ee3229f85e6
|
/fetch_wikipedia.py
|
d1de9c92094b4f0d3da64d08c1770d7aa6228e38
|
[] |
no_license
|
Hadryan/choir_music_data
|
e1d163cd8fe7e5f286b46b0bd51d5c66dc73d3e6
|
2d72a8f76c40915ffff3fc0929ad498b8b232136
|
refs/heads/master
| 2023-01-01T01:06:48.196179
| 2020-10-20T17:47:11
| 2020-10-20T17:47:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,074
|
py
|
import logging
import pandas as pd
import regex
import yaml
logging.basicConfig(level=logging.INFO, format='{asctime} {message}', style='{')
BASE_URL = "https://en.wikipedia.org/wiki"
lists = [
"List of Medieval composers",
"List of Renaissance composers",
"List of Baroque composers",
"List of Classical-era composers",
"List of Romantic-era composers",
"List of 20th-century classical composers",
"List of 21st-century classical composers",
]
era_config = """
Medieval:
type: list
Renaissance:
type: list
Baroque:
type: list
Classical-era:
type: table
birth_col: Date born
died_col: Date died
Romantic-era:
type: table
birth_col: Date born
died_col: Date died
20th-century classical:
type: table
birth_col: Year of birth
died_col: Year of death
21st-century classical:
type: table
birth_col: Date born
died_col: Date died
"""
era_config = yaml.load(era_config)
def fetch_composer_list(era_name: str, era_config: dict = era_config) -> pd.DataFrame:
config = era_config[era_name]
assert config["type"] == "table"
list_name = f"List of {era_name} composers".replace(' ', '%20')
url = f"{BASE_URL}/{list_name}"
logging.info(f"Fetching url {url}")
df = find_composer_table(url)
df.rename(columns={config["birth_col"]: "birth", config["died_col"]: "death", }, inplace=True)
df["era"] = era_name
# df = dfs[2]
logging.info(df.head())
return df
def find_composer_table(url):
dfs = pd.read_html(url)
logging.info(f"{len(dfs)} tables on the page")
for i, df in enumerate(dfs):
print(i, df.columns)
print(df.head(2))
if "Name" in df.columns and "Nationality" in df.columns:
break
logging.info(i)
return df
def get_table_eras(config: dict) -> list:
return [era_name for era_name in config if config[era_name]['type'] == 'table']
def main():
dfs = []
columns = 'Name birth death Nationality era'.split()
exclude = ["Classical-era", "21st-century classical"]
for era_name in get_table_eras(era_config):
if era_name in exclude:
continue
df = fetch_composer_list(era_name)
print(df.head(1), df.columns)
df = df[columns]
dfs.append(df)
print(len(df), df.columns)
full_df = pd.concat(dfs, sort=False)
print([len(df) for df in dfs], len(full_df))
full_df.to_csv('composers.csv', index=False)
print(full_df.head())
def fetch_from_wiki(era_name: str) -> pd.DataFrame:
with open(f"List of {era_name} composers.wiki".replace(" ", "_")) as f:
data = []
for line in f:
pattern = rf"""\*\s*
\[\[
(?:([^)|]*)\|)?
([^)|]*)
\]\]
\s*
\(
(
([^)-–]*)
[-–]
([^)]*)
)
"""
match = regex.match(pattern, line, regex.VERBOSE)
# if line.startswith("*"):
# print(line, match)
if match:
article, name, _, birth, death = match.groups()
# if name2 is None:
# name2 = name
data.append((article, name, birth, death))
different_article = [(name, a, birth, death) for a, name, birth, death in data if a is not None and a != name]
print(*different_article, sep="\n")
data = [(a if a else name, name, birth, death) for a, name, birth, death in data]
dat = list(zip(['article', 'name', 'birth', 'death'], list(zip(*data))))
print(dat[:4])
df = pd.DataFrame(dict(dat))
df['era'] = era_name
print(df.head())
df.to_csv(f'composers_{era_name}.csv', index=False)
if __name__ == '__main__':
# main()
# df = fetch_composer_list("Classical-era")
# print(df.columns, df.head())
# fetch_from_wiki("Baroque")
# fetch_from_wiki("Classical-era")
fetch_from_wiki("Medieval")
|
[
"horvath.arpad.szfvar@gmail.com"
] |
horvath.arpad.szfvar@gmail.com
|
fe81ad7960d5b1d25e0585a0663772e6856dbfc5
|
9eb08685de453d8c099015adcc3a2ff29041fdf3
|
/examples/app/dijkstra/cthread_write_node_visited.py
|
fe4c54310af1366a2bdd6f5e3fcd00737a2d566d
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
Python3pkg/PyCoRAM
|
938b6b4308e7ef04ec01f6c2e08c7eae7d0f5d3f
|
cd546ffe6cc6fbdabd49ba204a098b8feb6e7e97
|
refs/heads/master
| 2021-01-21T17:46:35.469599
| 2017-05-21T20:40:41
| 2017-05-21T20:40:41
| 91,986,108
| 0
| 0
| null | 2017-05-21T20:40:28
| 2017-05-21T20:40:28
| null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
COMM_SIZE = 2 ** 4
STREAM_SIZE= 2 ** 10
DATA_BITWIDTH = 32
outstream = CoramOutStream(idx=0, datawidth=DATA_BITWIDTH, size=STREAM_SIZE)
channel = CoramChannel(idx=0, datawidth=DATA_BITWIDTH, size=COMM_SIZE)
def write_node_visited():
addr = channel.read()
outstream.read_nonblocking(addr, 1)
while True:
write_node_visited()
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
43246448601d4be7c5377c2e63e5de5eb06bdf8a
|
e296f0f3d7db598aba5658de3ff8c767634e533e
|
/zoo/migrations/092_zoo_animals_models.py
|
ef6a523b495b98b75535c3c8c4944d6553ec77e5
|
[] |
no_license
|
devfort/wildlifenearyou
|
b2ac05070aa6face60156d6e7c85f98f00013c25
|
8e618aea90bbcedc45a4e30199e31880ea9e6dca
|
refs/heads/master
| 2021-01-13T01:25:29.467549
| 2010-06-10T06:37:43
| 2010-06-10T06:37:43
| 7,874,317
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
from django.conf import settings
if settings.DATABASE_ENGINE == 'mysql':
from dmigrations.mysql import migrations as m
elif settings.DATABASE_ENGINE == 'sqlite3':
from dmigrations.sqlite3 import migrations as m
import datetime
migration = m.Migration(sql_up=["""
DROP TABLE `animals_superspecies`;
""", """
CREATE TABLE `animals_superspecies` (
`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
`common_name` varchar(500) NOT NULL,
`description` longtext NOT NULL,
`slug` varchar(255) NOT NULL UNIQUE,
`species_group_id` integer NULL,
`type` varchar(10) NOT NULL,
`latin_name` varchar(500) NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8
;
"""], sql_down=["""
DROP TABLE `animals_superspecies`;
""", """
CREATE TABLE `animals_superspecies` (
`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY,
`common_name` varchar(500) NOT NULL,
`description` longtext NOT NULL,
`slug` varchar(255) NOT NULL UNIQUE,
`species_group_id` integer NULL,
`type` varchar(10) NOT NULL,
`latin_name` varchar(500) NULL,
) ENGINE=InnoDB DEFAULT CHARSET=utf8
;
"""])
# Dirty fake this - the sql_down should recerate the table as it used to be
# but doesn't.
|
[
"simon@simonwillison.net"
] |
simon@simonwillison.net
|
3e47ecd125a7c7fc3a15c5fe0529ae8fdf94115c
|
de08fd5306c61a797c24bda927bd809acd7a22e7
|
/socket_message.py
|
5ebd79abf7863b7eb311d5e9bc187ba709ce01df
|
[] |
no_license
|
benjsonzhang/shield
|
847175c4cb0746d6c047e08e529973b6748eefd1
|
c737d8cdd231139fdda94675a4f68dfc671fdf4e
|
refs/heads/master
| 2023-06-06T23:52:08.696106
| 2021-07-02T08:24:56
| 2021-07-02T08:24:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,979
|
py
|
# coding=utf-8
import requests
import json
import time
import socket
import base64
import socks
base_url = "http://127.0.0.1:18080"
# proxies = {"http": "127.0.0.1:8888", "https": "127.0.0.1:8888"}
proxies = None
class XhsSocketClient:
def __init__(self):
self.client = None
def connect(self):
if self.client is not None:
return
HOST = 'apppush.xiaohongshu.com'
PORT = 5333
socket.socket = socks.socksocket
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((HOST, PORT))
def send(self, based64ed):
if self.client is None:
self.connect()
content = base64.b64decode(based64ed)
user_input = content
print content
print "================= sent =========================="
self.client.sendall(user_input)
def close(self):
if self.client is not None:
self.client.close()
print "================= close =========================="
def __del__(self):
self.close()
def test():
url = base_url + "/s/login"
params = {
"uid": "60ddb0d10000000001015f01",
"sid": "session.1594515706332388740313",
"deviceId": "353CE2F-0131-474E-A093-DF39D12E4515",
"fingerprint": "202006261454019d1b1a0db8172b59cbe25925c1c3900001ab4b27b14c4883",
}
text = requests.get(url, params=params, proxies=proxies).json()
print json.dumps(text, ensure_ascii=False)
client = XhsSocketClient()
client.connect()
client.send(text.get("data").get("body"))
url = base_url + "/s/send"
params = {
"receiver": "9f775f5f3cf7000000000100",
"sender": "60ddb0d10000000001015f01",
"content": "hi",
}
text = requests.get(url, params=params, proxies=proxies).json()
client.send(text.get("data").get("body"))
print json.dumps(text, ensure_ascii=False)
if __name__ == '__main__':
test()
|
[
"you@example.com"
] |
you@example.com
|
615841aab6c9a74f46e393e9f7ee1893d81e286d
|
f7b6d64aafdd3d711c0c5d4f9d6a2565944e7dc6
|
/magnifico_ranks/forms.py
|
59884f13011b7713376529093499824de736913c
|
[] |
no_license
|
Robert-Moringa/magnifico_ranks
|
488d94c8c20f6f333b0e7e831f1f0feca508f331
|
06ae6dfc2b7c5ed4068c04010223d09a8ebd43a5
|
refs/heads/master
| 2023-08-04T18:31:04.134837
| 2021-09-23T12:03:29
| 2021-09-23T12:03:29
| 407,243,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
from django import forms
from .models import Profile,Project, Review
class EditProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user','project']
class AddProjectForm(forms.ModelForm):
class Meta:
model=Project
exclude=['user']
class AddReviewForm(forms.ModelForm):
class Meta:
model=Review
fields=['design_rating','usability_rating', 'content_rating', 'comment']
|
[
"robert.maina@student.moringaschool.com"
] |
robert.maina@student.moringaschool.com
|
3c61ed8f1fb9adeb440a74e760b56771db2fe28a
|
71e43068e82c91acbb3849169d1723f1375ac27f
|
/talon_one/models/campaign_entity.py
|
d18644a5c7e9f97ee933c25ece57b7a788d886dd
|
[
"MIT"
] |
permissive
|
talon-one/talon_one.py
|
aa08a1dbddd8ea324846ae022e43d441c57028f6
|
917dffb010e3d3e2f841be9cccba5bba1ea6c5c3
|
refs/heads/master
| 2023-05-11T18:50:00.041890
| 2023-05-03T20:17:39
| 2023-05-03T20:17:39
| 79,575,913
| 1
| 7
|
MIT
| 2023-05-03T15:10:14
| 2017-01-20T16:29:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,313
|
py
|
# coding: utf-8
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you access the Campaign Manager at `https://yourbaseurl.talon.one/`, the URL for the [updateCustomerSessionV2](https://docs.talon.one/integration-api#operation/updateCustomerSessionV2) endpoint is `https://yourbaseurl.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document:
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class CampaignEntity(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'campaign_id': 'int'
}
attribute_map = {
'campaign_id': 'campaignId'
}
def __init__(self, campaign_id=None, local_vars_configuration=None): # noqa: E501
"""CampaignEntity - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._campaign_id = None
self.discriminator = None
self.campaign_id = campaign_id
@property
def campaign_id(self):
"""Gets the campaign_id of this CampaignEntity. # noqa: E501
The ID of the campaign that owns this entity. # noqa: E501
:return: The campaign_id of this CampaignEntity. # noqa: E501
:rtype: int
"""
return self._campaign_id
@campaign_id.setter
def campaign_id(self, campaign_id):
"""Sets the campaign_id of this CampaignEntity.
The ID of the campaign that owns this entity. # noqa: E501
:param campaign_id: The campaign_id of this CampaignEntity. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and campaign_id is None: # noqa: E501
raise ValueError("Invalid value for `campaign_id`, must not be `None`") # noqa: E501
self._campaign_id = campaign_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CampaignEntity):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CampaignEntity):
return True
return self.to_dict() != other.to_dict()
|
[
"noreply@github.com"
] |
talon-one.noreply@github.com
|
96e61e2cdbe7a1d8025135c55c7b6a562a6712ce
|
be39b38dddcdc98cab0c29189f5fb74f8c709f16
|
/fast-news/tinyimg.py
|
0518aa353865dd9b19099ab125ddf986c49e3998
|
[] |
no_license
|
nate-parrott/fast-news
|
80bd371a56188475a0bb647fc630ee54f388bf96
|
45ed735744bd138e65e643fd1713fd8e2d107b1e
|
refs/heads/master
| 2020-04-06T05:53:44.724331
| 2017-01-02T09:05:44
| 2017-01-02T09:05:44
| 50,979,865
| 0
| 1
| null | 2016-09-19T05:03:22
| 2016-02-03T06:41:10
|
Python
|
UTF-8
|
Python
| false
| false
| 223
|
py
|
from PIL import Image
def tinyimg(img):
size = list(img.size)
img = img.convert('RGB')
img = img.resize((2,2), Image.ANTIALIAS)
return {"size": [2,2], "pixels": map(list, img.getdata()), "real_size": size}
|
[
"nateparro2t@gmail.com"
] |
nateparro2t@gmail.com
|
ac19473c96d750e019a99cc9116eeac478f5a6f6
|
c2c8915d745411a0268ee5ce18d8bf7532a09e1a
|
/stix-1.1.1.0/stix/common/identity.py
|
fb2cab262ee10fd12f1a630979928c1e562cc9e3
|
[
"BSD-3-Clause"
] |
permissive
|
asealey/crits_dependencies
|
581d44e77f297af7edb78d08f0bf11ad6712b3ab
|
a8049c214c4570188f6101cedbacf669168f5e52
|
refs/heads/master
| 2021-01-17T11:50:10.020346
| 2014-12-28T06:53:01
| 2014-12-28T06:53:01
| 28,555,464
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,659
|
py
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from __future__ import absolute_import
import stix
import stix.bindings.stix_common as common_binding
import stix.bindings.extensions.identity.ciq_identity_3_0 as ciq_identity_binding
import stix.utils
# import of RelatedIdentity is below
class Identity(stix.Entity):
_binding = common_binding
_namespace = 'http://stix.mitre.org/common-1'
def __init__(self, id_=None, idref=None, name=None, related_identities=None):
self.id_ = id_ or stix.utils.create_id("Identity")
self.idref = idref
self.name = name
self.related_identities = RelatedIdentities()
@property
def id_(self):
return self._id
@id_.setter
def id_(self, value):
if not value:
self._id = None
else:
self._id = value
self.idref = None
@property
def idref(self):
return self._idref
@idref.setter
def idref(self, value):
if not value:
self._idref = None
else:
self._idref = value
self.id_ = None # unset id_ if idref is present
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value if value else None
def to_obj(self, return_obj=None):
if not return_obj:
return_obj = self._binding.IdentityType()
return_obj.set_id(self.id_)
return_obj.set_idref(self.idref)
if self.name:
return_obj.set_Name(self.name)
if self.related_identities:
return_obj.set_Related_Identities(self.related_identities.to_obj())
return return_obj
@staticmethod
def lookup_class(xsi_type):
if not xsi_type:
raise ValueError("xsi:type is required")
for (k, v) in _EXTENSION_MAP.iteritems():
# TODO: for now we ignore the prefix and just check for
# a partial match
if xsi_type in k:
return v
raise ValueError("Unregistered xsi:type %s" % xsi_type)
@classmethod
def from_obj(cls, obj, return_obj=None):
import stix.extensions.identity.ciq_identity_3_0
if not obj:
return None
if not return_obj:
try:
klass = Identity.lookup_class(obj.xml_type)
return_obj = klass.from_obj(obj)
except AttributeError:
return_obj = Identity.from_obj(obj, cls())
else:
return_obj.id_ = obj.get_id()
return_obj.idref = obj.get_idref()
return_obj.name = obj.get_Name()
return_obj.related_identities = RelatedIdentities.from_obj(obj.get_Related_Identities())
return return_obj
def to_dict(self):
d = {}
if self.name:
d['name'] = self.name
if self.id_:
d['id'] = self.id_
if self.idref:
d['idref'] = self.idref
if self.related_identities:
d['related_identities'] = self.related_identities.to_dict()
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
import stix.extensions.identity.ciq_identity_3_0
if not dict_repr:
return None
if not return_obj:
xsi_type = dict_repr.get('xsi:type')
if xsi_type:
klass = Identity.lookup_class(dict_repr.get('xsi:type'))
return_obj = klass.from_dict(dict_repr)
else:
return_obj = Identity.from_dict(dict_repr, cls())
else:
return_obj.name = dict_repr.get('name')
return_obj.id_ = dict_repr.get('id')
return_obj.idref = dict_repr.get('idref')
return_obj.related_identities = RelatedIdentities.from_dict(dict_repr.get('related_identities'))
return return_obj
# We can't import RelatedIdentity until we have defined the Identity class.
from stix.common.related import RelatedIdentity
class RelatedIdentities(stix.EntityList):
_namespace = 'http://stix.mitre.org/common-1'
_binding = common_binding
_binding_class = common_binding.RelatedIdentitiesType
_binding_var = "Related_Identity"
_contained_type = RelatedIdentity
_inner_name = "identities"
_EXTENSION_MAP = {}
def add_extension(cls):
_EXTENSION_MAP[cls._XSI_TYPE] = cls
|
[
"ssnow@mitre.org"
] |
ssnow@mitre.org
|
ac6f4dc12017e63d1d823e917c619ad903fa43c1
|
6406c60d42a243e3566cb0864c14453b686809a6
|
/plugins/geoip/geoip.py
|
bddb6c1cf3d51e178318ee5515199df9d8c90934
|
[
"Apache-2.0"
] |
permissive
|
Srungaram/alerta-contrib
|
79d29d6aa7f6b2bc0e102ef3a4b52a93b9029317
|
3a507dee5cd20fac22676c70da0b8c7364562b3d
|
refs/heads/master
| 2021-01-18T13:53:12.550536
| 2016-01-12T01:17:54
| 2016-01-12T01:17:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
import os
import requests
from alerta.app import app
from alerta.plugins import PluginBase
LOG = app.logger
GEOIP_URL = os.environ.get('GEOIP_URL') or app.config.get('GEOIP_URL', 'http://freegeoip.net/json')
class GeoLocation(PluginBase):
def pre_receive(self, alert):
if 'ip' in alert.attributes:
url = '%s/%s' % (GEOIP_URL, alert.attributes['ip'])
else:
raise RuntimeWarning("IP address must be included as an alert attribute.")
r = requests.get(url, headers={'Content-type': 'application/json'}, timeout=2)
try:
alert.attributes.update(r.json())
except Exception as e:
raise RuntimeError("GeoIP lookup failed: %s" % str(e))
return alert
def post_receive(self, alert):
pass
|
[
"nick.satterly@guardian.co.uk"
] |
nick.satterly@guardian.co.uk
|
b6b46f3b5c490d5ed428d489edc8cf451ffe1eaf
|
10c26e25f7da2289d50b1138b7da48bf9a02d42f
|
/Oj/problemset/migrations/0007_problem_constraints.py
|
900aa1cc6aea2d37b2658f990e3bb3290cf41cb8
|
[] |
no_license
|
ParitoshAggarwal/OJ
|
e1392a02dd95d42b4d72ba69b891db9df5e406ad
|
1a4acb5e620b0575d744fd8e4c13148062d1670c
|
refs/heads/master
| 2022-10-19T21:18:02.512008
| 2017-12-27T06:53:46
| 2017-12-27T06:53:46
| 97,516,099
| 0
| 1
| null | 2022-10-13T00:05:44
| 2017-07-17T19:50:06
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 511
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-06 07:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('problemset', '0006_problem_created_at'),
]
operations = [
migrations.AddField(
model_name='problem',
name='constraints',
field=models.CharField(default='null', max_length=100),
preserve_default=False,
),
]
|
[
"paritoshmait@gmail.com"
] |
paritoshmait@gmail.com
|
daf4d91f0be35c88b68c33c6b8c3a99f1f647dcb
|
327ec5f11dff7a034e32735fb9bfb3ca4d82569d
|
/examples_keras/attention_lstm.py
|
9752b36a591f1558a41b997f0592958716504f2b
|
[] |
no_license
|
liyi19950329/attention-mechanism
|
2a75894b9221bf6a887a81f3a507df5210bedf53
|
551aa72ac503a56354cd47a795874f49ffb6d097
|
refs/heads/master
| 2020-03-26T07:24:55.648061
| 2018-05-30T13:35:11
| 2018-05-30T13:35:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
""""""
import numpy as np
from data_helper import gen_time_data
from config import config_lstm as config
from keras.models import Model
from keras.layers import Input, Dense, LSTM
from keras.layers import Flatten
from attention.attention_keras import attention2d
np.random.seed(config.seed)
def build_model():
""""""
inputs = Input(shape=(config.time_steps, config.input_dim))
lstm_out = LSTM(config.lstm_units, return_sequences=True)(inputs)
attn = attention2d(lstm_out)
attn = Flatten()(attn)
output = Dense(1, activation='sigmoid')(attn)
model = Model(inputs=inputs, outputs=output)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
return model
if __name__ == '__main__':
""""""
x, y = gen_time_data()
model = build_model()
model.summary()
model.fit(x, y,
epochs=config.epochs,
batch_size=config.batch_size,
validation_split=0.8)
|
[
"imhuay@163.com"
] |
imhuay@163.com
|
d6da81bb8230bb0d23e43c83f2b8f0d1d236fb6e
|
1511782b2cc3dcf1f7e058e5046ec67a5561ba51
|
/2020/0820/abc048_b.py
|
9ff929bc2dfcc189523e62da80581147dc6590ec
|
[] |
no_license
|
keiouok/atcoder
|
7d8a053b0cf5b42e71e265450121d1ad686fee6d
|
9af301c6d63b0c2db60ac8af5bbe1431e14bb289
|
refs/heads/master
| 2021-09-07T11:48:55.953252
| 2021-07-31T15:29:50
| 2021-07-31T15:29:50
| 186,214,079
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 840
|
py
|
import sys, re, os
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import permutations, combinations, product, accumulate
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from fractions import gcd
from bisect import bisect, bisect_left, bisect_right
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def S_MAP(): return map(str, input().split())
def LIST(): return list(map(int, input().split()))
def S_LIST(): return list(map(str, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
a, b, x = MAP()
c = b // x
d = (a - 1) // x
print(c - d)
|
[
"ko.yuka.kp2@is.naist.jp"
] |
ko.yuka.kp2@is.naist.jp
|
34e2f783a8d885369f4c0a834d7a310488fc97eb
|
7a1243f229dd1ff671b26d5035c39219c9fa9586
|
/785A - Anton and Polyhedrons.py
|
e357a1510c8468cfdbb6915ef3df63565885f072
|
[] |
no_license
|
henseljahja/code-forces
|
ce4063f30754bdee0e4d6ebc58b55f0874bf2cf9
|
1ca196636073331507b9bf48cb78cff625f44def
|
refs/heads/main
| 2023-03-21T10:00:39.986314
| 2021-03-08T15:45:22
| 2021-03-08T15:45:22
| 339,361,495
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
n = int(input())
count = 0
for i in range(n):
s = input()
if s == "Tetrahedron":
count+=4
elif s == "Cube":
count+=6
elif s == "Octahedron":
count+=8
elif s == "Dodecahedron":
count+=12
elif s == "Icosahedron":
count+=20
print(count)
|
[
"henseljahja@gmail.com"
] |
henseljahja@gmail.com
|
431ae555a024847331eb19fa7fc1d105bbf339cf
|
046df94b4f437b2e30b80d24193fcd5380ee7b54
|
/finger_exercise/3some_simple_numerical_programs/root_and_pwr(chapter3.1).py
|
c6a14b81e9766eaa106309a2f2f514a88f048ee1
|
[] |
no_license
|
LordBao666/MITLecture6.0001_Introduction_To_CS_Programing_In_Python
|
570565a3a931269f47fe15fd83527567a24fc134
|
e9fca10ad0226c8620ae36d063c2bc49da114ca4
|
refs/heads/master
| 2023-04-02T10:40:48.564479
| 2021-04-06T15:19:47
| 2021-04-06T15:19:47
| 344,118,089
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,107
|
py
|
"""
@Author : Lord_Bao
@Date : 2021/3/4
"""
"""
Finger exercise: Write a program that asks the user to enter an integer and prints
two integers, root and pwr, such that 0 < pwr < 6 and root**pwr is equal to the integer
entered by the user. If no such pair of integers exists, it should print a mes-
sage to that effect.
我觉得pwr的范围应该是 1<pwr<6,毕竟任何数字的1次方都是本身,后面的代码也是基于此改变写的。
"""
"""
1.输入的数字可能是正数,也可能是负数,为了简便思考,先考虑正数。
"""
# ele = int(input("Enter a integer: "))
# root = 0
# pwr = 2
# is_found = False
# # 如果root的平方大于ele,显然root的3次方,4次方等等都会大于ele。而(root+1)的平方也会大于ele,依次类推。
# # 所以 循环的必要条件之一可以设置为 root **2<= ele
# while not is_found and root ** 2 <= ele:
# for pwr in range(2, 6):
# if root ** pwr > ele:
# break
# elif root ** pwr == ele:
# is_found = True
# break
#
# if not is_found:
# root += 1
#
# if not is_found:
# print("no such pair of integers exists")
# else:
# print("root :" + str(root) + " ,pwr :" + str(pwr))
"""
2.在1的基础上,考虑负数的情况。
如果是负数,那么不用想,它的pwr只能是奇数,也就是说for循环那里修改一下。
然后针对 负数写一个新的循环即可
"""
# ele = int(input("Enter a integer: "))
# root = 0
# pwr = 2
# is_found = False
#
# if ele >= 0:
# while not is_found and root ** 2 <= ele:
# for pwr in range(2, 6):
# if root ** pwr > ele:
# break
# elif root ** pwr == ele:
# is_found = True
# break
#
# if not is_found:
# root += 1
# else:
# while not is_found and root ** 3 >= ele:
# for pwr in range(3, 6, 2): # 与正数的不同
# if root ** pwr < ele:
# break
# elif root ** pwr == ele:
# # 找到合适的数字,设置不再继续循环
# is_found = True
# break
#
# if not is_found:
# root -= 1
# if not is_found:
# print("no such pair of integers exists")
# else:
# print("root :" + str(root) + " ,pwr :" + str(pwr))
"""
3. 2的代码比较复杂,考虑能不能整合一下。我们发现一点,其实差异就在负数上。
可以先将负数取绝对值,也就是当做正数处理,然后再做差异化处理。其实发现,还是挺麻烦的。
感觉可读性来上,还不如2.
"""
# ele = int(input("Enter a integer: "))
# root = 0
# pwr = 2
#
# is_fond = False
# guess_num = 0
# while not is_fond and root ** 2 <= abs(ele):
# for pwr in range(2, 6):
# guess_num += 1
# if root ** pwr > abs(ele):
# break
# elif root ** pwr == abs(ele):
# if ele >= 0:
# is_fond = True
# else:
# # -1 比较特殊,因为 -1 的 2次方的绝对值为1,如果按照elif来处理,那么将是一个错误。
# # 毕竟 -1的3次方的绝对值也为1
# if ele == -1:
# root = -root
# pwr = 3
# is_fond = True
# elif pwr % 2 != 0:
# root = -root
# is_fond = True
# """这里将做修改
# """
# break
#
# if not is_fond:
# root += 1
#
# print("guess num is " + str(guess_num))
# if not is_fond:
# print("no such pair of integers exists")
# else:
# print("root :" + str(root) + " ,pwr :" + str(pwr))
"""
4.3的部分代码需要优化一下,那就是-16 这种特殊情况,应该直接跳出循环。为了满足这种情况,应该设置一个
标志位 can_be_found. 这样可以减少猜测的次数。
"""
ele = int(input("Enter a integer: "))
root = 0
pwr = 2
is_fond = False
can_be_found = True
guess_num = 0
while can_be_found and not is_fond and root ** 2 <= abs(ele):
for pwr in range(2, 6):
guess_num += 1
if root ** pwr > abs(ele):
break
elif root ** pwr == abs(ele):
if ele >= 0:
is_fond = True
else:
# -1 比较特殊,因为 -1 的 2次方的绝对值为1,如果按照elif来处理,那么将是一个错误。
# 毕竟 -1的3次方的绝对值也为1
if ele == -1:
root = -root
pwr = 3
is_fond = True
elif pwr % 2 != 0:
root = -root
is_fond = True
else:
can_be_found = False
break
if not is_fond and can_be_found:
root += 1
print("guess num is " + str(guess_num))
if not is_fond:
print("no such pair of integers exists")
else:
print("root :" + str(root) + " ,pwr :" + str(pwr))
|
[
"916900021@qq.com"
] |
916900021@qq.com
|
e76b055b43caaad4b0d6d1a83662bdceba8a7781
|
e875742da7480b3277d0f34606e55a95c009c966
|
/sage/database/db_iterator.py
|
cb16aeb1e09745f2f73021d68945688529ee70f5
|
[
"MIT"
] |
permissive
|
sage-org/sage-engine
|
b10a621c25b938b21a33e8f6273299ab8798118a
|
33b3c775f6932d0e61bcce2c763f2d63846dba40
|
refs/heads/master
| 2022-09-03T10:25:42.121293
| 2021-05-05T16:15:37
| 2021-05-05T16:15:37
| 128,745,071
| 34
| 16
|
MIT
| 2021-04-19T18:26:51
| 2018-04-09T09:11:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
# db_iterator.py
# Author: Thomas MINIER - MIT License 2017-2020
from abc import ABC, abstractmethod
from typing import Dict, Tuple
class DBIterator(ABC):
"""
A DBIterator follows the iterator protocol and evaluates a triple pattern against a RDF dataset.
Typically, a subclass of this iterator is returned by a call to DBConnector#search_pattern.
"""
def __init__(self, pattern: Dict[str, str]):
super(DBIterator, self).__init__()
self._pattern = pattern
@property
def subject(self) -> str:
return self._pattern["subject"]
@property
def predicate(self) -> str:
return self._pattern["predicate"]
@property
def object(self) -> str:
return self._pattern["object"]
def __iter__(self):
return self
def __next__(self):
return self.next()
@abstractmethod
def last_read(self) -> str:
"""Return the index ID of the last element read"""
pass
@abstractmethod
def next(self) -> Tuple[str, str, str]:
"""Return the next RDF triple or raise `StopIteration` if there are no more triples to scan"""
pass
@abstractmethod
def has_next(self) -> bool:
"""Return True if there is still results to read, and False otherwise"""
pass
class EmptyIterator(DBIterator):
"""An iterator that yields nothing and completes immediatly"""
def last_read(self) -> str:
"""Return the index ID of the last element read"""
return ''
def next(self) -> None:
"""Return the next solution mapping or raise `StopIteration` if there are no more solutions"""
return None
def has_next(self) -> bool:
"""Return True if there is still results to read, and False otherwise"""
return False
|
[
"tminier01@gmail.com"
] |
tminier01@gmail.com
|
aa3278469beb7aa0d87b269d60de80611431af8b
|
65bc6de5088d989b24571213fb16ebc557f922b4
|
/for_beginners/render.py
|
3522b818c2a2f7351f64dfa6f9191e102dc2dc86
|
[] |
no_license
|
vpj/for_beginners
|
76f71ab780194c47ecd5cd67c21359b5d22bebd2
|
45af7567c821ffe329d7db64390aaa391176d974
|
refs/heads/master
| 2020-03-09T01:44:44.353629
| 2019-02-23T05:42:01
| 2019-02-23T05:42:01
| 128,522,978
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
from IPython.core.display import display,HTML,Markdown
import random
import string
def diagram_tensor(shape):
dom_id = ''.join(random.choices(string.ascii_lowercase, k=10))
without_none = []
for s in shape:
if s is not None and str(s) != '?':
without_none.append(str(s))
js = 'main.renderTensor("%s", [%s]);' % (dom_id, ', '.join(without_none))
js = 'require(["main"], function(main) { ' + js + ' });'
display(HTML('<div id="%s"></div><script>%s</script>' % (dom_id, js)))
def latex(string):
display(Markdown("\\begin{align}\n%s\n\\end{align}" % string))
def init():
file = open("./js/main.js")
display(HTML('<script>' + file.read() + '</script>'))
file = open("./js/styles.css")
display(HTML('<style>' + file.read() + '</style>'))
|
[
"vpjayasiri@gmail.com"
] |
vpjayasiri@gmail.com
|
e23b16ab111fbaf9c461df611408f424da327b87
|
3b1053ea38fee9a59d335dd75bb6a6906d298594
|
/tests/history/test_db.py
|
b89ddb4fc414fd676c723f8f83aff718c8588661
|
[
"MIT"
] |
permissive
|
tianshengsui/virtool
|
8c59bb36c7e2924586be34fabc6b861e16691b7d
|
eb75637eb6ca9dcba647ad8acad5d316877dd55e
|
refs/heads/master
| 2023-04-19T16:36:54.894894
| 2021-04-23T19:09:33
| 2021-04-23T19:09:33
| 295,793,679
| 0
| 0
|
MIT
| 2020-09-30T23:53:54
| 2020-09-15T16:55:59
| null |
UTF-8
|
Python
| false
| false
| 4,959
|
py
|
import datetime
from aiohttp.test_utils import make_mocked_coro
import pytest
import virtool.history.db
class TestAdd:
async def test(self, snapshot, dbi, static_time, test_otu_edit, test_change):
app = {
"db": dbi,
"settings": {
"data_path": "/foo/bar"
}
}
old, new = test_otu_edit
change = await virtool.history.db.add(
app,
"edit",
old,
new,
"Edited {}".format(new["name"]),
"test"
)
document = await dbi.history.find_one()
snapshot.assert_match(change, "change")
snapshot.assert_match(document, "document")
async def test_create(self, snapshot, dbi, static_time, test_otu_edit, test_change):
app = {
"db": dbi,
"settings": {
"data_path": "/foo/bar"
}
}
# There is no old document because this is a change document for a otu creation operation.
old = None
new, _ = test_otu_edit
description = "Created {}".format(new["name"])
change = await virtool.history.db.add(
app,
"create",
old,
new,
description,
"test"
)
document = await dbi.history.find_one()
snapshot.assert_match(change)
snapshot.assert_match(document)
async def test_remove(self, snapshot, dbi, static_time, test_otu_edit, test_change):
"""
Test that the addition of a change due to otu removal inserts the expected change document.
"""
app = {
"db": dbi,
"settings": {
"data_path": "/foo/bar"
}
}
# There is no new document because this is a change document for a otu removal operation.
new = None
old, _ = test_otu_edit
description = "Removed {}".format(old["name"])
change = await virtool.history.db.add(
app,
"remove",
old,
new,
description,
"test"
)
document = await dbi.history.find_one()
snapshot.assert_match(change)
snapshot.assert_match(document)
@pytest.mark.parametrize("file", [True, False])
async def test_get(file, mocker, snapshot, dbi):
await dbi.history.insert_one({
"_id": "baz.2",
"diff": "file" if file else {
"foo": "bar"
}
})
mocker.patch("virtool.history.utils.read_diff_file", make_mocked_coro(return_value="loaded"))
app = {
"db": dbi,
"settings": {
"data_path": "/foo/bar"
}
}
document = await virtool.history.db.get(app, "baz.2")
assert document == {
"id": "baz.2",
"diff": "loaded" if file else {
"foo": "bar"
}
}
@pytest.mark.parametrize("exists", [True, False])
async def test_get_most_recent_change(exists, snapshot, dbi, static_time):
"""
Test that the most recent change document is returned for the given ``otu_id``.
"""
# First change is 3 days before the second
delta = datetime.timedelta(3)
if exists:
await dbi.history.insert_many([
{
"_id": "6116cba1.1",
"description": "Description",
"method_name": "update",
"created_at": static_time.datetime - delta,
"user": {
"id": "test"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 1
},
"index": {
"id": "unbuilt"
}
},
{
"_id": "6116cba1.2",
"description": "Description number 2",
"method_name": "update",
"created_at": static_time.datetime,
"user": {
"id": "test"
},
"otu": {
"id": "6116cba1",
"name": "Prunus virus F",
"version": 2
},
"index": {
"id": "unbuilt"
}
}
])
most_recent = await virtool.history.db.get_most_recent_change(dbi, "6116cba1")
snapshot.assert_match(most_recent)
@pytest.mark.parametrize("remove", [True, False])
async def test_patch_to_version(remove, snapshot, dbi, create_mock_history):
await create_mock_history(remove=remove)
app = {
"db": dbi
}
current, patched, reverted_change_ids = await virtool.history.db.patch_to_version(
app,
"6116cba1",
1
)
snapshot.assert_match(current)
snapshot.assert_match(patched)
snapshot.assert_match(reverted_change_ids)
|
[
"igboyes@gmail.com"
] |
igboyes@gmail.com
|
e705e73a92cfe656ad0214556898e7e4b23a554e
|
3603f8f76ff81ea75bfc916888bdcfa55b7f12e4
|
/alds/alds1_3_c_3.py
|
8f430994eab3e0fa497613b4711b4f742fd06f07
|
[] |
no_license
|
kimotot/aizu
|
4de0319959a3b166b8c2c4940ab7b701b6ee3395
|
315be1240cff733e1c6a7cd98942a95b3bd7ec96
|
refs/heads/master
| 2021-07-24T12:37:41.935302
| 2021-03-10T09:05:05
| 2021-03-10T09:05:05
| 91,927,321
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
class dllist:
def __init__(self):
self._list = []
self._fp = 0
self._bp = 0
def insert(self, x):
self._list.append(x)
self._fp += 1
def delete(self, x):
t = self._list[::-1]
if x in t[:-self._bp]:
self._list.remove(x)
self._fp -= 1
def deleteFirst(self):
self._list.pop()
def deleteLast(self):
self._bp += 1
def disp(self):
print(" ".join([str(x) for x in self._list[self._bp:][::-1]]))
if __name__ == '__main__':
q = dllist()
n = int(input())
for _ in range(n):
inst = input().split()
if inst[0] == "insert":
q.insert(int(inst[1]))
elif inst[0] == "delete":
q.delete(int(inst[1]))
elif inst[0] == "deleteFirst":
q.deleteFirst()
elif inst[0] == "deleteLast":
q.deleteLast()
q.disp()
|
[
"god4bid@hear.to"
] |
god4bid@hear.to
|
03aea4efe7d08e5382591defb2dbff580c6377bb
|
89dedd7f3c7acc81d12e2bcb2e716f9af9e5fa04
|
/net/data/verify_certificate_chain_unittest/generate-violates-basic-constraints-pathlen-0.py
|
3a5d481a89b5dd5ba56fe6f423bd03a657836f27
|
[
"BSD-3-Clause"
] |
permissive
|
bino7/chromium
|
8d26f84a1b6e38a73d1b97fea6057c634eff68cb
|
4666a6bb6fdcb1114afecf77bdaa239d9787b752
|
refs/heads/master
| 2022-12-22T14:31:53.913081
| 2016-09-06T10:05:11
| 2016-09-06T10:05:11
| 67,410,510
| 1
| 3
|
BSD-3-Clause
| 2022-12-17T03:08:52
| 2016-09-05T10:11:59
| null |
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
#!/usr/bin/python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Certificate chain with 2 intermediates. The first intermediate has a basic
constraints path length of 0, so it is a violation for it to have a subordinate
intermediate."""
import common
# Self-signed root certificate (used as trust anchor).
root = common.create_self_signed_root_certificate('Root')
# Intermediate with pathlen 0
intermediate1 = common.create_intermediate_certificate('Intermediate1', root)
intermediate1.get_extensions().set_property('basicConstraints',
'critical,CA:true,pathlen:0')
# Another intermediate (with the same pathlen restriction)
intermediate2 = common.create_intermediate_certificate('Intermediate2',
intermediate1)
intermediate2.get_extensions().set_property('basicConstraints',
'critical,CA:true,pathlen:0')
# Target certificate.
target = common.create_end_entity_certificate('Target', intermediate2)
chain = [target, intermediate2, intermediate1]
trusted = common.TrustAnchor(root, constrained=False)
time = common.DEFAULT_TIME
verify_result = False
errors = ['max_path_length reached']
common.write_test_file(__doc__, chain, trusted, time, verify_result, errors)
|
[
"bino.zh@gmail.com"
] |
bino.zh@gmail.com
|
92fdbccdfd0fc9f6975c00ee840161f5e714294f
|
965ef7770b0efdf28ba1ab74e72598353060d256
|
/ex19.2.py
|
bba7bf0979f9673338439ffb38f22e4cdda3d9a1
|
[] |
no_license
|
DikranHachikyan/CPYT210409-PLDA
|
87dfca698212905b33b50a0564ae904911d7ff00
|
7ec99a7ef6082e8b58d5a79a66a7875837520d21
|
refs/heads/master
| 2023-04-18T04:46:38.231406
| 2021-04-28T10:05:08
| 2021-04-28T10:05:08
| 356,246,772
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
if __name__ == '__main__':
tp = 11, 12, 34, 56, 66
for value in tp:
print(f'value = {value}')
print('--- ---')
|
[
"dhachikian@expert-bg.org"
] |
dhachikian@expert-bg.org
|
0a34dd857a7b93bf5b7bda441a669b2fd4af80c6
|
a189360771d93aa4bcfdfb9f7a794f770b557528
|
/ch04/04math.py
|
1e27e49063acd87d3434ea09921f2e113d401a0d
|
[] |
no_license
|
kevin510610/Book_Python-Tensorflow_PoWen-Ko
|
88402a6a9ae3da9fdba7858e56f9c364264854c0
|
cbeede8ab566214da8fa5c5953f8ab85c2d23bb8
|
refs/heads/master
| 2023-04-04T05:37:52.283802
| 2021-04-13T03:09:24
| 2021-04-13T03:09:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
#!/usr/bin/env
__author__ = "Powen Ko, www.powenko.com"
a=5
b=2.2
print(a+b)
print(a-b)
print(a*2)
print(a/2)
print(a<<1)
print(a>>1)
print(a%3)
d=4.3
print(d/3)
print(d//3)
|
[
"kevin510610@gmail.com"
] |
kevin510610@gmail.com
|
c119987946003ffee661f92e47bec4950eb56b4a
|
40c890270ff8dcdcce4006b4cfbc2ce9d7488992
|
/accounts/migrations/0003_remove_city_city_id.py
|
679ebf454dbd3248bef81a12d3ac1de9b949daab
|
[] |
no_license
|
khanansha/Concierge_healthcare
|
d084cabcb0ad5a8fe6914357f31df26a678bfbbd
|
a7178797233ccccc2918b4f602eb2086239c1e3a
|
refs/heads/master
| 2022-08-15T23:33:46.492016
| 2020-06-01T05:15:46
| 2020-06-01T05:15:46
| 264,127,559
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
# Generated by Django 2.0.2 on 2020-05-05 08:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_city'),
]
operations = [
migrations.RemoveField(
model_name='city',
name='city_id',
),
]
|
[
"anjumkhan88987@gmail.com"
] |
anjumkhan88987@gmail.com
|
43f97e33e7e0ffe81805cf0f366dc587951fd7a7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03242/s186722755.py
|
1e12736a618aee1a0f1febb374b8dc0561b0b758
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
#k = int(input())
#s = input()
#a, b = map(int, input().split())
#s, t = map(str, input().split())
#l = list(map(int, input().split()))
#l = [list(map(int,input().split())) for i in range(n)]
#a = [list(input()) for _ in range(n)]
#a = [input() for _ in range(n)]
n = input()
for i in range(3):
if n[i] == "9":
print("1",end="")
else:
print("9",end="")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ae7c02d0634e195e2f5f16fff2e7f39cf0af80bf
|
a2b7fba22a16f379ccca2e38d9d6291b9562abc3
|
/Graph Theory/Shortest Path/Dijkstra_Adj_List.py
|
5c2f782bdc48fbbc9154089714557f45d4c33b37
|
[] |
no_license
|
neelamy/Algorithm
|
565c1cea72715745653e90a3dabbba1e9e283fd8
|
7c9f53ff27bcb840b9dbc20d520f003f4d76fe17
|
refs/heads/master
| 2020-06-10T15:53:12.967832
| 2017-07-18T07:59:32
| 2017-07-18T07:59:32
| 75,953,017
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,384
|
py
|
# Python program for Dijkstra algorithm to find shortest path
# from source in undirected graph
# The program is for adjacency list representation of the graph
#Complexity : O(V(V+E))
from collections import defaultdict
#Class to represent a graph
class Graph:
def __init__(self,vertices):
self.V= vertices #No. of vertices
self.graph = defaultdict(list) # default dictionary to store graph
# function to add an edge to graph
def addEdge(self,u,v,w):
self.graph[u].append((v,w))
self.graph[v].append((u,w))
# A utility function to find the vertex with minimum dist value, from
# the set of vertices still in queue
def get_min(self,dist,queue):
# Initialize min value and index as -1
minimum = float("Inf")
index =-1
#from the dist array,pick one which has min value and is till in queue
for i in range(len(dist)):
if dist[i] < minimum and i in queue:
minimum = dist[i]
index =i
return index
# print the solution
def printSolution(self, dist):
print("Vertex Distance from Source")
for i in range(self.V):
print("%d \t\t %d" % (i, dist[i]))
# Function to construct and print MST for a graph represented using adjacency
# matrix representation
def dijkstra(self, src):
# Initialize all dist/distance as INFINITE
dist = [float("Inf")] * self.V
# Always include first 1st vertex in MST
dist[src] = 0 # Make dist 0 so that this vertex is picked as first vertex
# Add all vertices in queue
queue = []
for i in range(self.V):
queue.append(i)
while queue:
# Pick the minimum dist vertex from the set of vertices
# still in queue
u = self.get_min(dist,queue)
# remove min element and print it
queue.remove(u)
# Update dist value and parent index of the adjacent vertices of
# the picked vertex. Consider only those vertices which are still in
# queue
for node,weight in self.graph[u]:
if node in queue and dist[u] + weight < dist[node]:
dist[node] = dist[u] + weight
# print all distance
self.printSolution(dist)
g = Graph(9)
g.addEdge(0, 1, 4)
g.addEdge(0, 7, 8)
g.addEdge(1, 2, 8)
g.addEdge(1, 7, 11)
g.addEdge(2, 3, 7)
g.addEdge(2, 8, 2)
g.addEdge(2, 5, 4)
g.addEdge(3, 4, 9)
g.addEdge(3, 5, 14)
g.addEdge(4, 5, 10)
g.addEdge(5, 6, 2)
g.addEdge(6, 7, 1)
g.addEdge(6, 8, 6)
g.addEdge(7, 8, 7)
#Print the solution
g.dijkstra(0)
|
[
"neelamyadav.jss@gmail.com"
] |
neelamyadav.jss@gmail.com
|
4e2e690cffc6ea0a5b52591b5a5e0a009dcd358c
|
882c2b3c410b838372d43e431d1ccd6e02ba45f6
|
/CE/AlMgSiX_FCC/fit_normal_ce.py
|
511a540b3be842951dfe083c03285d25100530dc
|
[] |
no_license
|
davidkleiven/GPAWTutorial
|
d46f7b8750172ba5ff36ccc27f97089cac94fd95
|
0bffc300df1d048142559855d3ccb9d0d8074d2e
|
refs/heads/master
| 2021-06-08T05:44:42.784850
| 2021-02-25T10:23:28
| 2021-02-25T10:23:28
| 98,557,110
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,425
|
py
|
from clease.data_manager import CorrFuncEnergyDataManager
from clease import PhysicalRidge
from clease.physical_ridge import random_cv_hyper_opt
import numpy as np
import re
import json
db_name = "data/almgsiX_clease_voldep.db"
manager = CorrFuncEnergyDataManager(
db_name, "binary_linear_cf"
)
X, y = manager.get_data([('struct_type', '=', 'initial')])
names = manager._feat_names
sizes = [int(n[1]) for n in names]
prog = re.compile(r"d(\d+)")
dia = []
for n in names:
res = prog.findall(n)
if not res:
dia.append(0.0)
else:
dia.append(float(res[0]))
regressor = PhysicalRidge(normalize=False)
regressor.sizes = sizes
regressor.diameters = dia
params = {
'lamb_dia': np.logspace(-6, 6, 5000).tolist(),
'lamb_size': np.logspace(-6, 6, 5000).tolist(),
'size_decay': ['linear', 'exponential', 'poly2', 'poly4', 'poly6'],
'dia_decay': ['linear', 'exponential', 'poly2', 'poly4', 'poly6']
}
res = random_cv_hyper_opt(regressor, params, X, y, cv=5, num_trials=10000)
outfile = "data/almgsix_normal_ce.json"
data = {
'names': manager._feat_names,
'coeff': res['best_coeffs'].tolist(),
'X': X.tolist(),
'y': y.tolist(),
'cv': res['best_cv'],
'eci': {n: c for n, c in zip(manager._feat_names, res['best_coeffs'])},
}
with open(outfile, 'w') as out:
json.dump(data, out)
print(f"Results written to: {outfile}")
|
[
"davidkleiven446@gmail.com"
] |
davidkleiven446@gmail.com
|
d257b5b87299f18aed6ffda8d0085bb46c30103e
|
0c6f666fdf7e2ba22f5a3ae16748920a3b8583ff
|
/main/forms.py
|
7ec71db826a82359caf4bdd5587cf98660550eba
|
[] |
no_license
|
rrabit42/Seoul1ro
|
8e9f07fab5bbe247998beeea6b2776fb1e6016d5
|
fdb30ef184cba553d3baaaabcceca2644c9dea78
|
refs/heads/master
| 2023-04-30T12:20:11.785273
| 2021-05-24T12:22:59
| 2021-05-24T12:22:59
| 369,833,280
| 1
| 3
| null | 2021-09-19T16:42:52
| 2021-05-22T14:49:04
|
CSS
|
UTF-8
|
Python
| false
| false
| 159
|
py
|
from django import forms
from main.models import Search
class InputForm(forms.ModelForm):
class Meta:
model = Search
fields = '__all__'
|
[
"gegiraffe@gmail.com"
] |
gegiraffe@gmail.com
|
8c718a33f548a45817a6cd05154b9cb7c371f9bf
|
1af49694004c6fbc31deada5618dae37255ce978
|
/build/fuchsia/common_args.py
|
f23e8eb74946cbee6e9fc4235e82efbcc3bc8002
|
[
"BSD-3-Clause"
] |
permissive
|
sadrulhc/chromium
|
59682b173a00269ed036eee5ebfa317ba3a770cc
|
a4b950c23db47a0fdd63549cccf9ac8acd8e2c41
|
refs/heads/master
| 2023-02-02T07:59:20.295144
| 2020-12-01T21:32:32
| 2020-12-01T21:32:32
| 317,678,056
| 3
| 0
|
BSD-3-Clause
| 2020-12-01T21:56:26
| 2020-12-01T21:56:25
| null |
UTF-8
|
Python
| false
| false
| 6,550
|
py
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import importlib
import logging
import os
import sys
from common import GetHostArchFromPlatform
def _AddTargetSpecificationArgs(arg_parser):
"""Returns a parser that handles the target type used for the test run."""
device_args = arg_parser.add_argument_group(
'target',
'Arguments specifying the Fuchsia target type. To see a list of '
'arguments available for a specific target type, specify the desired '
'target to use and add the --help flag.')
device_args.add_argument('--target-cpu',
default=GetHostArchFromPlatform(),
help='GN target_cpu setting for the build. Defaults '
'to the same architecture as host cpu.')
device_args.add_argument('--device',
default=None,
choices=['aemu', 'qemu', 'device', 'custom'],
help='Choose to run on aemu|qemu|device. '
'By default, Fuchsia will run on AEMU on x64 '
'hosts and QEMU on arm64 hosts. Alternatively, '
'setting to custom will require specifying the '
'subclass of Target class used via the '
'--custom-device-target flag.')
device_args.add_argument('-d',
action='store_const',
dest='device',
const='device',
help='Run on device instead of emulator.')
device_args.add_argument('--custom-device-target',
default=None,
help='Specify path to file that contains the '
'subclass of Target that will be used. Only '
'needed if device specific operations such as '
'paving is required.')
device_args.add_argument('--fuchsia-out-dir',
help='Path to a Fuchsia build output directory. '
'Setting the GN arg '
'"default_fuchsia_build_dir_for_installation" '
'will cause it to be passed here.')
def _GetTargetClass(args):
"""Gets the target class to be used for the test run."""
if args.device == 'custom':
if not args.custom_device_target:
raise Exception('--custom-device-target flag must be set when device '
'flag set to custom.')
target_path = args.custom_device_target
else:
if not args.device:
args.device = 'aemu' if args.target_cpu == 'x64' else 'qemu'
target_path = '%s_target' % args.device
try:
loaded_target = importlib.import_module(target_path)
except ImportError:
logging.error('Cannot import from %s. Make sure that --ext-device-path '
'is pointing to a file containing a target '
'module.' % target_path)
raise
return loaded_target.GetTargetType()
def AddCommonArgs(arg_parser):
"""Adds command line arguments to |arg_parser| for options which are shared
across test and executable target types.
Args:
arg_parser: an ArgumentParser object."""
_AddTargetSpecificationArgs(arg_parser)
# Parse the args used to specify target
module_args, _ = arg_parser.parse_known_args()
# Determine the target class and register target specific args.
target_class = _GetTargetClass(module_args)
target_class.RegisterArgs(arg_parser)
package_args = arg_parser.add_argument_group('package', 'Fuchsia Packages')
package_args.add_argument(
'--package',
action='append',
help='Paths of the packages to install, including '
'all dependencies.')
package_args.add_argument(
'--package-name',
help='Name of the package to execute, defined in ' + 'package metadata.')
common_args = arg_parser.add_argument_group('common', 'Common arguments')
common_args.add_argument('--runner-logs-dir',
help='Directory to write test runner logs to.')
common_args.add_argument('--exclude-system-logs',
action='store_false',
dest='include_system_logs',
help='Do not show system log data.')
common_args.add_argument('--verbose', '-v', default=False,
action='store_true',
help='Enable debug-level logging.')
def ConfigureLogging(args):
"""Configures the logging level based on command line |args|."""
logging.basicConfig(level=(logging.DEBUG if args.verbose else logging.INFO),
format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
# The test server spawner is too noisy with INFO level logging, so tweak
# its verbosity a bit by adjusting its logging level.
logging.getLogger('chrome_test_server_spawner').setLevel(
logging.DEBUG if args.verbose else logging.WARN)
# Verbose SCP output can be useful at times but oftentimes is just too noisy.
# Only enable it if -vv is passed.
logging.getLogger('ssh').setLevel(
logging.DEBUG if args.verbose else logging.WARN)
# TODO(crbug.com/1121763): remove the need for additional_args
def GetDeploymentTargetForArgs(additional_args=None):
"""Constructs a deployment target object using command line arguments.
If needed, an additional_args dict can be used to supplement the
command line arguments."""
# Determine target type from command line arguments.
device_type_parser = argparse.ArgumentParser()
_AddTargetSpecificationArgs(device_type_parser)
module_args, _ = device_type_parser.parse_known_args()
target_class = _GetTargetClass(module_args)
# Process command line args needed to initialize target in separate arg
# parser.
target_arg_parser = argparse.ArgumentParser()
target_class.RegisterArgs(target_arg_parser)
known_args, _ = target_arg_parser.parse_known_args()
target_args = vars(known_args)
# target_cpu is needed to determine target type, and fuchsia_out_dir
# is needed for devices with Fuchsia built from source code.
target_args.update({'target_cpu': module_args.target_cpu})
target_args.update({'fuchsia_out_dir': module_args.fuchsia_out_dir})
if additional_args:
target_args.update(additional_args)
return target_class(**target_args)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
9a29514981517e14e9d56da464497502f36a5b60
|
fbe787892572c911a3dad0aacf11e0edf42bec25
|
/actor_critic/actor_critic_baselines.py
|
0b5e89ed012a03428429125bc460bb332b73c42f
|
[] |
no_license
|
vwxyzjn/tensorflow-beginner
|
edebed5238cc687d96bd2cd5120de0a135a159a5
|
4b76d2dae96ca57ac90a4a6cf0c2935d6f390be8
|
refs/heads/master
| 2020-05-09T23:23:17.459116
| 2019-04-15T14:21:29
| 2019-04-15T14:21:29
| 181,499,965
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
import argparse
import gym
import numpy as np
from stable_baselines.deepq import DQN, MlpPolicy
def callback(lcl, _glb):
"""
The callback function for logging and saving
:param lcl: (dict) the local variables
:param _glb: (dict) the global variables
:return: (bool) is solved
"""
# stop training if reward exceeds 199
if len(lcl['episode_rewards'][-101:-1]) == 0:
mean_100ep_reward = -np.inf
else:
mean_100ep_reward = round(float(np.mean(lcl['episode_rewards'][-101:-1])), 1)
print("mean_100ep_reward", mean_100ep_reward)
is_solved = lcl['step'] > 100 and mean_100ep_reward >= 199
return not is_solved
def main(args):
"""
Train and save the DQN model, for the cartpole problem
:param args: (ArgumentParser) the input arguments
"""
env = gym.make("CartPole-v0")
model = DQN(
env=env,
policy=MlpPolicy,
learning_rate=1e-3,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
)
model.learn(total_timesteps=args.max_timesteps, callback=callback, seed=1)
print("Finished")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train DQN on cartpole")
parser.add_argument('--max-timesteps', default=100000, type=int, help="Maximum number of timesteps")
args = parser.parse_args()
main(args)
|
[
"costa.huang@outlook.com"
] |
costa.huang@outlook.com
|
5a6e8273d7b26abe7ad8034b181f781338670eb7
|
45a506c5622f366e7013f1276f446a18fc2fc00d
|
/kedro/extras/transformers/__init__.py
|
5a9b3e9e214377fd9b451e3ec79e86b6822e459b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sbrugman/kedro
|
3e48bcc56cc61fbe575d1a52c4f5bf3e84b6f974
|
25c92b765fba4605a748bdaaa801cee540da611e
|
refs/heads/develop
| 2023-07-20T11:24:07.242114
| 2021-10-08T14:05:03
| 2021-10-08T14:05:03
| 404,517,683
| 1
| 2
|
NOASSERTION
| 2021-09-08T22:53:09
| 2021-09-08T22:53:09
| null |
UTF-8
|
Python
| false
| false
| 1,679
|
py
|
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""``kedro.extras.transformers`` is the home of Kedro's dataset transformers."""
from .memory_profiler import ProfileMemoryTransformer
from .time_profiler import ProfileTimeTransformer
__all__ = ["ProfileMemoryTransformer", "ProfileTimeTransformer"]
|
[
"noreply@github.com"
] |
sbrugman.noreply@github.com
|
1d66174523025aff8645ae370b76a48e039bd57f
|
3de6f7f6d8497e728101c368ec778e67f769bd6c
|
/notes/algo-ds-practice/problems/list/copy_list_random_pointer.py
|
50fdca295a2b01ae01f62fd4ce46c8ebe212ba04
|
[
"MIT"
] |
permissive
|
arnabs542/interview-notes
|
1fceae0cafa74ef23d0ce434e2bc8e85c4c76fdd
|
65af75e2b5725894fa5e13bb5cd9ecf152a0d652
|
refs/heads/master
| 2023-01-03T06:38:59.410704
| 2020-10-25T06:49:43
| 2020-10-25T06:49:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
'''
You are given a linked list with one pointer of each node pointing to the next node just like the usual.
Every node, however, also has a second pointer that can point to any node in the list.
Now write a program to deep copy this list.
Solution 1:
First backup all the nodes' next pointers node to another array.
next_backup = [node1, node2, node3 ... None]
Meaning next_backup[0] = node[0].next = node1.
Note that these are just references.
Now just deep-copy the original linked list, only considering the next pointers.
While copying (or after it), point
`original_0.next = copy_0`
and
`copy_0.random = original_0`
Now, while traversing the copy list, set the random pointers of copies correctly:
copy.random = copy.random.random.next
Now, traverse the original list and fix back the next pointers using the next_backup array.
Total complexity -> O(n+n+n) = O(n)
Space complexity = O(n)
SOLUTION 2:
We can also do it in space complexity O(1).
This is actually easier to understand. ;)
For every node original_i, make a copy of it just in front of it.
For example, if original_0.next = original_1, then now it will become
`original_0.next = copy_0`
`copy_0.next = original_1`
Now, set the random pointers of copies:
`copy_i.random = original_i.random.next`
We can do this because we know that the copy of a node is just after the original.
Now, fix the next pointers of all the nodes:
original_i.next = original_i.next.next
copy_i.next = copy_i.next.next
Time complexity = O(n)
Space complexity = O(1)
'''
|
[
"ajaggi@linkedin.com"
] |
ajaggi@linkedin.com
|
cbe2f4fb4bc9585cb7d499ad66ecb249f6693441
|
be7949a09fa8526299b42c4c27adbe72d59d2201
|
/cnns/nnlib/robustness/channels/channels_svd_examples.py
|
2a1d990426dabdcc224b92b08bdf79151201e3ba
|
[
"Apache-2.0"
] |
permissive
|
adam-dziedzic/bandlimited-cnns
|
375b5cccc7ab0f23d2fbdec4dead3bf81019f0b4
|
81aaa27f1dd9ea3d7d62b661dac40cac6c1ef77a
|
refs/heads/master
| 2022-11-25T05:40:55.044920
| 2020-06-07T16:14:34
| 2020-06-07T16:14:34
| 125,884,603
| 17
| 5
|
Apache-2.0
| 2022-11-21T21:01:46
| 2018-03-19T16:02:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 419
|
py
|
import sys
import numpy
import foolbox
from numpy.linalg import svd
numpy.set_printoptions(threshold=sys.maxsize)
image, label = foolbox.utils.samples(
dataset='cifar10', index=0, batchsize=1, data_format='channels_first')
image = image / 255 # # division by 255 to convert [0, 255] to [0, 1]
u, s, vh = svd(a=image, full_matrices=False)
print('label: ', label)
print('u: ', u)
print('s: ', s)
print('vh: ', vh)
|
[
"adam.dziedzi@gmail.com"
] |
adam.dziedzi@gmail.com
|
27c3cf0cfc2899972cb9c466686a6a8e0a9822a2
|
385295df7d11f258efb0500401e9e2837e143b37
|
/django/st01/blog/views.py
|
1458eb2af3cc920c3659e93203de4397bf806b03
|
[] |
no_license
|
ysjwdaypm/study
|
7f4b2a032f30ee6c9481ef3d9f180f947c8167c1
|
61059a4363928e023f3a0fa9f7b3ea726b953f96
|
refs/heads/master
| 2020-12-25T05:54:56.392792
| 2016-07-06T08:24:39
| 2016-07-06T08:24:39
| 62,702,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,503
|
py
|
# coding=utf-8
from django.shortcuts import render,render_to_response
from django.http import HttpResponse
from django.template import loader,Context,Template
import json,time,sys
import pytz, datetime
from models import Person,BagManager
from django.template import RequestContext
# Create your views here.
reload(sys)
sys.setdefaultencoding('utf8')
logPath = 'log.txt'
"""
python manage.py createsuperuser
python manage.py runserver
"""
def index(req):
print "ip : %s"%req.META
d = {'req':req,'d':datetime.datetime.now(),"title":"python","name":"guest","list":[1,2,3,4,5,6,7],"l":['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]}
return render_to_response('index.html',d,RequestContext(req))
def getNowTime():
return time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())
def log(req):
print req.GET['i']
if 'i' in req.GET:
f = open(logPath,'a')
f.write(req.GET['i'] + ' time:' + getNowTime()+ "\n<br>")
f.close()
print "write success"
dic = {"ret":True}
return HttpResponse(json.dumps(dic))
def readLog(req):
f = open(logPath,'r')
log = f.read()
return HttpResponse(log)
def register(req):
u = req.GET["u"]
pwd = req.GET["p"]
ret = {"ret":True}
plist = Person.objects.filter(name=u)
if len(plist) == 0:
Person.objects.create(name=u,password=pwd,age=30)
ret["msg"] = r"用户名 %s 创建成功"%u
else:
ret["ret"] = False
ret["msg"] = r"用户名 %s 已经存在"%u
return HttpResponse(json.dumps(ret,ensure_ascii=False))
def main(req):
action = req.GET["action"]
if action == "register":
return register(req)
elif action == "login":
return login(req)
elif action == "log":
log(req)
ret = {"msg":"undefin action %s"%action}
return HttpResponse(json.dumps(ret,ensure_ascii=False))
def wel(req):
ret = "";
for k in req.GET:
ret += k + ":" + req.GET[k] + "</br>"
t = Template("<h1>{{user.name}} welcome to my page</h1>");
c = Context({"user":{"name":"ysjwdaypm"}})
# return HttpResponse(t.render(c))
users = []
for user in Person.objects.all():
users.append({"name":user.name,"password":user.password})
BagManager.addItem(123);
return HttpResponse(json.dumps({"users":users},ensure_ascii=True))
def login(req):
u = req.GET['u']
pwd = req.GET['p']
ret = {"ret":True}
plist = Person.objects.filter(name=u)
if len(plist) == 0 or not plist[0].password == pwd:
ret["ret"] = False
ret["msg"] = " 帐号或密码错误"
return HttpResponse(json.dumps(ret,ensure_ascii=False))
|
[
"ysjwdaypm@163.com"
] |
ysjwdaypm@163.com
|
90d030e0aa07b3e43d0a019006b657649c9e1a90
|
69bf012ca88897cd87535701369f2b87c6522d57
|
/modules/templates/Turkey/controllers.py
|
116b2c9dde78a45c4e7ab6f6036bd9c90510e8ba
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
sahana/eden
|
e2cc73f6b34a2ab6579094da09367a9f0be10fd1
|
1cb5a76f36fb45fa636577e2ee5a9aa39f35b391
|
refs/heads/master
| 2023-08-20T20:56:57.404752
| 2023-02-24T17:16:47
| 2023-02-24T17:16:47
| 3,021,325
| 227
| 253
|
NOASSERTION
| 2023-01-10T10:32:33
| 2011-12-20T17:49:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,364
|
py
|
# -*- coding: utf-8 -*-
from gluon import *
from s3 import S3CustomController
THEME = "Turkey"
# =============================================================================
class index(S3CustomController):
""" Custom Home Page """
def __call__(self):
output = {}
# Allow editing of page content from browser using CMS module
if current.deployment_settings.has_module("cms"):
system_roles = current.auth.get_system_roles()
ADMIN = system_roles.ADMIN in current.session.s3.roles
s3db = current.s3db
table = s3db.cms_post
ltable = s3db.cms_post_module
module = "default"
resource = "index"
query = (ltable.module == module) & \
((ltable.resource == None) | \
(ltable.resource == resource)) & \
(ltable.post_id == table.id) & \
(table.deleted != True)
item = current.db(query).select(table.body,
table.id,
limitby=(0, 1)).first()
if item:
if ADMIN:
item = DIV(XML(item.body),
BR(),
A(current.T("Edit"),
_href=URL(c="cms", f="post",
args=[item.id, "update"]),
_class="action-btn"))
else:
item = DIV(XML(item.body))
elif ADMIN:
if current.response.s3.crud.formstyle == "bootstrap":
_class = "btn"
else:
_class = "action-btn"
item = A(current.T("Edit"),
_href=URL(c="cms", f="post", args="create",
vars={"module": module,
"resource": resource
}),
_class="%s cms-edit" % _class)
else:
item = ""
else:
item = ""
output["item"] = item
self._view(THEME, "index.html")
return output
# END =========================================================================
|
[
"fran@aidiq.com"
] |
fran@aidiq.com
|
31aec23ecdfa187a48c29120276e4f8366771eae
|
038af1bfd275530413a7b4e28bf0e40eddf632c6
|
/parsifal/apps/reviews/migrations/0032_auto_20151006_0619.py
|
1bbe293f14044c44f5fc91de120586c344cc84f0
|
[
"MIT"
] |
permissive
|
vitorfs/parsifal
|
5c5345ff75b48c5596977c8e0a9c4c537ed4726c
|
68c3ce3623a210a9c649a27f9d21ae6130541ea9
|
refs/heads/dev
| 2023-05-24T16:34:31.899776
| 2022-08-14T16:30:06
| 2022-08-14T16:30:06
| 11,648,402
| 410
| 223
|
MIT
| 2023-05-22T10:47:20
| 2013-07-25T00:27:21
|
Python
|
UTF-8
|
Python
| false
| false
| 456
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reviews', '0031_article_selection_criteria'),
]
operations = [
migrations.RemoveField(
model_name='customarticlestatus',
name='review',
),
migrations.DeleteModel(
name='CustomArticleStatus',
),
]
|
[
"vitorfs@gmail.com"
] |
vitorfs@gmail.com
|
210584bd6d6c063d2901bfc6bfd97577749d7d89
|
cd3c9415d279d2545106f559ab3117aa55ed17ef
|
/L02 运算符、if判断、while、for循环、字符串相关函数/课件/09 while循环例子.py
|
e459f1ef6baee3d648ec9554d6743e82ccb0c891
|
[] |
no_license
|
yuanchangwang/yuan
|
ad418609e6415a6b186b3d95a48e2bd341f2d07f
|
22a43c09af559e9f6bdf6e8e3727c1b290bc27d4
|
refs/heads/master
| 2020-08-27T02:17:40.544162
| 2019-11-19T10:25:51
| 2019-11-19T10:25:51
| 217,216,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,924
|
py
|
# (1)打印一行十个小星星
#**********
i = 0
while i<10:
print("*",end="")
i+=1
#help 查看帮助文档 help(print)
# help(print)
# (2)用一个变量打印出一行十个小星星 (十个小星星塞在一个变量中,最后达因变量)
print("<===>")
i = 0
strvar = ''
while i<10:
strvar += "*"
i+=1
# strvar = strvar + "*" + "*" + "*"
print(strvar)
# (3)打印一行十个小星星 奇数个打印★ 偶数个打印☆
'''
0 % 2 = 0
1 % 2 = 1
2 % 2 = 0
3 % 2 = 1
4 % 2 = 0
任意数n 与 2 取余 取值范围是0 , 1
0 % 3 = 0
1 % 3 = 1
2 % 3 = 2
3 % 3 = 0
4 % 3 = 1
5 % 3 = 2
任意数n 与 3 取余 取值范围是0,1,2
任意数n 与 m 取余 取值范围是 0 ~ (m-1)
'''
i = 0
while i<10:
# 代码写在这
# 余数为0 打印黑猩
if i % 2 == 0:
print("★",end="")
else:
#否则打印白星
print("☆",end="")
i+=1
# (4)用 一个循环 打印十行十列小星星
print()
i = 0
while i<100:
# 输出小星星
print("*",end="")
# i 从0开始到99结束
# 任意数n与10取余 范围0 ~ 9 0代表第一个星星 9代表最后一个,正好10 , 如果是10个选择换行
if i % 10 == 9:
# 打印换行
print()
i+=1
"""
**********
**********
**********
**********
**********
**********
**********
**********
**********
**********
"""
# (5)一个循环 打印十行十列隔列变色小星星(一个循环)
i = 0
while i<100:
# 输出小星星
if i % 2 == 0:
print("★",end="")
else:
print("☆",end="")
# 最后换行
if i % 10 == 9:
print()
i+=1
"""
# 格列变色
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
★☆★☆★☆★☆★☆
"""
# (6)一个循环 打印十行十列隔行变色小星星(一个循环)
"""
# 地板除算法
0 // 10 0
1 // 10 0
2 // 10 0
3 // 10 0
...
9 // 10 0
10 // 10 1
11 // 10 1
12 // 10 1
...
19 // 10 1
20 // 10 2
21 // 10 2
..
29 // 10 2
...
...
90 // 10 9
91 // 10 9
..
99 // 10 9
0 // 3 0
1 // 3 0
2 // 3 0
3 // 3 1
4 // 3 1
5 // 3 1
10个0
10个1
10个2
10个3
...
10个9
=> 任意数和n进行地板除 : 会出现n个相同的数字
"""
#★☆
i = 0
while i<100:
# 利用地板除与取余的规律 产生十个相同的数字,并且按10个相同的余数取花色
if i // 10 % 2 == 0:
print("★",end="")
else:
print("☆",end="")
# 控制换行
if i % 10 == 9:
print()
i+=1
'''
★★★★★★★★★★
☆☆☆☆☆☆☆☆☆☆
★★★★★★★★★★
☆☆☆☆☆☆☆☆☆☆
★★★★★★★★★★
☆☆☆☆☆☆☆☆☆☆
★★★★★★★★★★
☆☆☆☆☆☆☆☆☆☆
★★★★★★★★★★
☆☆☆☆☆☆☆☆☆☆
'''
|
[
"991552699@qq.com"
] |
991552699@qq.com
|
758237f387f0ed09696e3ddefa728eaadd792a79
|
4aa7a4d0525095725eb99843c83827ba4806ceb1
|
/tf/tf08_mv2.py
|
07920ed1a465784c7cf577b6538dd87eef4b0862
|
[] |
no_license
|
seonukim/Study
|
65a70f5bdfad68f643abc3086d5c7484bb2439d4
|
a5f2538f9ae8b5fc93b5149dd51704e8881f0a80
|
refs/heads/master
| 2022-12-04T17:04:31.489771
| 2020-08-21T00:35:15
| 2020-08-21T00:35:15
| 260,144,755
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,251
|
py
|
# mv : Multi Variables
import os
import tensorflow as tf
path = 'D:/Study/data/csv/'
os.chdir(path)
tf.compat.v1.set_random_seed(777)
x_data = [[73., 51., 65.],
[92., 98., 11.],
[89., 31., 33.],
[99., 33., 100.],
[17., 66., 79.]]
y_data = [[152.],
[185.],
[180.],
[205.],
[142.]]
x = tf.compat.v1.placeholder(tf.float32, shape = [None, 3])
y = tf.compat.v1.placeholder(tf.float32, shape = [None, 1])
w = tf.compat.v1.Variable(tf.random.normal([3, 1]), name = 'weight')
b = tf.compat.v1.Variable(tf.random.normal([1]), name = 'bias')
hypothesis = tf.compat.v1.add(tf.compat.v1.matmul(x, w), b) # wx + b
cost = tf.compat.v1.reduce_mean(tf.compat.v1.square(hypothesis - y_data))
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate = 5e-6)
train = optimizer.minimize(cost)
fedict = {x: x_data, y: y_data}
init = tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init)
for step in range(2000 + 1):
cost_val, hy_val, _ = sess.run([cost, hypothesis, train], feed_dict = fedict)
if step % 50 == 0:
print(f'{step}, cost : {cost_val}, \n{step} 예측값 : \n{hy_val}\n')
|
[
"92.seoonooo@gmail.com"
] |
92.seoonooo@gmail.com
|
3ce72250025b94e8864b7f1f307db8f7b8c7ef73
|
3a0430831f3f9fc551ce02f625318754c17a5357
|
/app/database/tables.py
|
e26bf8c2f915d4db3512c9b8a8e20ed0ced8fc7a
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
victor-iyi/heart-disease
|
8589409388495029a2219c08fad57e0941bfbff1
|
06540b582e8752d2bb6a32366077872d32d7c0e4
|
refs/heads/master
| 2023-08-03T11:18:37.711933
| 2021-09-19T16:30:05
| 2021-09-19T16:30:05
| 363,746,469
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,535
|
py
|
# Copyright 2021 Victor I. Afolabi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from passlib.context import CryptContext
from sqlalchemy import Column, DateTime, Enum
from sqlalchemy import Integer, Numeric, String, Text
from app.database import Base
class Category(Enum):
patient = 'Patient'
practitioner = 'Medical Practitioner'
class User(Base):
__tablename__ = 'user'
# User ID column.
id = Column(Integer, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
password_hash = Column(String(64), nullable=False)
first_name = Column(String(32), index=True)
last_name = Column(String(32), index=True)
category = Column(Category, index=True,
nullable=False,
default=Category.patient)
__mapper_args__ = {
'polymorphic_identity': 'user',
'polymorphic_on': category,
}
# Password context.
pwd_context = CryptContext(schemes=['bcrypt'], deprecated='auto')
def __repr__(self) -> str:
return f'User({self.email}, {self.category})'
@staticmethod
def hash_password(password: str) -> str:
return User.pwd_context.hash(password)
@staticmethod
def verify_password(password: str, hash_password: str) -> bool:
return User.pwd_context.verify(password, hash_password)
class Patient(User):
# Patient info.
age = Column(Integer)
contact = Column(String(15), index=True)
history = Column(Text)
aliment = Column(Text)
last_visit_diagnosis = Column(DateTime)
guardian_fullname = Column(String(64))
guardian_email = Column(String)
guardian_phone = Column(String(15))
occurences_of_illness = Column(Text)
last_treatment = Column(DateTime)
__mapper_args__ = {
'polymorphic_identity': 'patient',
'inherit_condition': User.category == Category.patient
}
def __repr__(self) -> str:
return f'Patient({self.email})'
class Practitoner(User):
practitioner_data = Column(String)
__mapper_args__ = {
'polymorphic_identity': 'practitioner',
'inherit_condition': User.category == Category.practitioner
}
def __repr__(self) -> str:
return f'Practitioner({self.email})'
class Feature(Base):
__tablename__ = 'features'
# Primary key.
id = Column(Integer, primary_key=True, index=True)
# Features.
age = Column(Integer, nullable=False)
sex = Column(Integer, nullable=False)
cp = Column(Integer, nullable=False)
trestbps = Column(Integer, nullable=False)
chol = Column(Integer, nullable=False)
fbs = Column(Integer, nullable=False)
restecg = Column(Integer, nullable=False)
thalach = Column(Integer, nullable=False)
exang = Column(Integer, nullable=False)
oldpeak = Column(Numeric, nullable=False)
slope = Column(Integer, nullable=False)
ca = Column(Integer, nullable=False)
thal = Column(Integer, nullable=False)
# Target.
target = Column(Integer, nullable=True)
|
[
"javafolabi@gmail.com"
] |
javafolabi@gmail.com
|
7063e3a2f690e52339d69ce4edbc432271d79b30
|
223f8feb7b9ff6334ca7d047636fbbcb598c824c
|
/src/web/web/settings.py
|
bfdabc775015d1c9cde45d4c5489e1882893226d
|
[] |
no_license
|
cluster311/backend.cluster311.com
|
169b3c621c7f0231844c4e9b6ad51a9feada4608
|
f5ab1ebbd220d3ab4bae253cc61fddbe1153f8d8
|
refs/heads/master
| 2023-08-06T20:01:00.563492
| 2020-04-10T20:02:22
| 2020-04-10T20:02:22
| 254,720,332
| 0
| 0
| null | 2021-09-22T18:52:07
| 2020-04-10T19:36:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,079
|
py
|
"""
Django settings for web project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@+wq2cosila9tbeg0vpul0-0xvlsm)1(+g0llgz7e+-2_m22st'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'web.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"andres@data99.com.ar"
] |
andres@data99.com.ar
|
80e46d30ab05b2895d952c71dce8447f1266d6a9
|
78d23de227a4c9f2ee6eb422e379b913c06dfcb8
|
/LeetCode/41.py
|
4121f226d68d1e982fa42e6e49ff768f1509886a
|
[] |
no_license
|
siddharthcurious/Pythonic3-Feel
|
df145293a3f1a7627d08c4bedd7e22dfed9892c0
|
898b402b7a65073d58c280589342fc8c156a5cb1
|
refs/heads/master
| 2020-03-25T05:07:42.372477
| 2019-09-12T06:26:45
| 2019-09-12T06:26:45
| 143,430,534
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
from typing import List
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
minimum = nums[0]
maximum = nums[0]
for e in nums:
if e > maximum:
maximum = e
if e < minimum:
minimum = e
print(maximum, minimum)
if __name__ == "__main__":
s = Solution()
arr = [3, 4, -1, 1]
r = s.firstMissingPositive(arr)
print(r)
|
[
"sandhyalalkumar@gmail.com"
] |
sandhyalalkumar@gmail.com
|
9815dae1a781a0753475d33dc8c2dfb696bc31a4
|
97e37192d4a695777c538596086c0be826b721e1
|
/vedastr/lr_schedulers/base.py
|
95d0f2e637e52d41a0e3cddfded57efb211e5a7c
|
[
"Apache-2.0"
] |
permissive
|
Sayyam-Jain/vedastr
|
1b587adc1ff4dc79ab7acc71d7ee08fe600c8933
|
83511a408b68c264561a30daff5154cd0148bebd
|
refs/heads/master
| 2022-12-13T08:06:21.304845
| 2020-09-10T05:05:50
| 2020-09-10T05:05:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,825
|
py
|
import warnings
import weakref
from functools import wraps
import numpy as np
from torch.optim import Optimizer
class _Iter_LRScheduler(object):
def __init__(self, optimizer, niter_per_epoch, last_iter=-1, iter_based=True):
self._iter_based = iter_based
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
self.niter_per_epoch = niter_per_epoch
if last_iter == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
last_iter = 0
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.last_epoch = int(last_iter / niter_per_epoch)
# Following https://github.com/pytorch/pytorch/issues/20124
# We would like to ensure that `lr_scheduler.step()` is called after
# `optimizer.step()`
def with_counter(method):
if getattr(method, '_with_counter', False):
# `optimizer.step()` has already been replaced, return.
return method
# Keep a weak reference to the optimizer instance to prevent
# cyclic references.
instance_ref = weakref.ref(method.__self__)
# Get the unbound method for the same purpose.
func = method.__func__
cls = instance_ref().__class__
del method
@wraps(func)
def wrapper(*args, **kwargs):
instance = instance_ref()
instance._step_count += 1
wrapped = func.__get__(instance, cls)
return wrapped(*args, **kwargs)
# Note that the returned function here is no longer a bound method,
# so attributes like `__func__` and `__self__` no longer exist.
wrapper._with_counter = True
return wrapper
self.optimizer.step = with_counter(self.optimizer.step)
self.optimizer._step_count = 0
self._step_count = 0
self.iter_nums(last_iter)
self.step()
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.__dict__.update(state_dict)
def get_lr(self):
raise NotImplementedError
def iter_nums(self, iter_=None):
# Raise a warning if old pattern is detected
# https://github.com/pytorch/pytorch/issues/20124
if self._step_count == 1:
if not hasattr(self.optimizer.step, "_with_counter"):
warnings.warn("Seems like `optimizer.step()` has been overridden after learning rate scheduler "
"initialization. Please, make sure to call `optimizer.step()` before "
"`lr_scheduler.step()`. See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
# Just check if there were two first lr_scheduler.step() calls before optimizer.step()
elif self.optimizer._step_count < 1:
warnings.warn("Detected call of `lr_scheduler.step()` before `optimizer.step()`. "
"In PyTorch 1.1.0 and later, you should call them in the opposite order: "
"`optimizer.step()` before `lr_scheduler.step()`. Failure to do this "
"will result in PyTorch skipping the first value of the learning rate schedule."
"See more details at "
"https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate", UserWarning)
self._step_count += 1
if iter_ is None:
iter_ = self.last_iter + 1
self.last_iter = iter_
self.last_epoch = np.ceil(iter_ / self.niter_per_epoch)
def step(self):
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
|
[
"jun.sun@media-smart.cn"
] |
jun.sun@media-smart.cn
|
30b8677941dafba525a06bf773593a0852b3b768
|
f6786f5f51c0a71a09213e2f729766d1a04dffa2
|
/두근두근_파이썬/11_File/Labs/328_hangman_game.py
|
50ceef354db69f90bce3e63a80c529b8ec789d86
|
[] |
no_license
|
SuperstarterJaeeun/Learn-Programming-Book
|
4f075fdec386a0449da8d0d08bb8f1b6d6b2f304
|
f768acfffcb20b9fc97946ca491f6ffb20671896
|
refs/heads/master
| 2023-07-24T07:13:24.374240
| 2021-09-06T14:56:02
| 2021-09-06T14:56:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 713
|
py
|
import random
guesses = ''
turns = 10
infile = open("word.txt", "r", encoding="UTF8")
lines = infile.readlines()
word = random.choice(lines)
while turns > 0 :
failed = 0
for char in word :
if char in guesses :
print(char, end = "")
else :
print("_", end = "")
failed += 1
if failed == 0:
print("사용자 승리")
break
print("")
guess = input("단어를 추측하시오 : ")
guesses += guess
if guess not in word :
turns -= 1
print("틀렸음!")
print(str(turns) + "기회가 남았음!")
if turns == 0:
print("사용자 패배 정답은 " + word)
infile.close()
|
[
"danghyeona0113@gmail.com"
] |
danghyeona0113@gmail.com
|
c7139cdba6d0bdc5caf556866c895ed914db146f
|
6a96d6c5ba06ef175ebeed773fc925fcad7ddbd2
|
/MaLongHui_Django/apps/users/urls.py
|
0a64e123fc1b9b6346ce505192f5960b68fa429a
|
[] |
no_license
|
AmirHuang/MaLongHui_Django
|
38934c3de34f705a70458ff8c644efce69854435
|
0bcff7f0311d6bddd504d088ad52e8217f5c8c74
|
refs/heads/master
| 2020-05-04T04:48:10.149549
| 2019-04-02T01:24:01
| 2019-04-02T01:24:01
| 178,974,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 868
|
py
|
# _*_ coding: utf-8 _*_
# @time : 2019/03/30
# @Author : Amir
# @Site :
# @File : urls.py
# @Software : PyCharm
from django.urls import path
from rest_framework import routers
from . import views
from rest_framework_jwt.views import obtain_jwt_token
routers = routers.DefaultRouter()
routers.register(r'work_experiences', views.UserWorkExperienceViewSet, base_name='work_experiences')
routers.register(r'education_experiences', views.UserEducationExperienceViewSet, base_name='education_experiences')
urlpatterns = [
path(r'user/', views.UserDetailView.as_view()),
path(r'usernames/<str:username>/count/', views.UsernameCountView.as_view()),
path(r'mobiles/<str:mobile>/count/', views.MobileCountView.as_view()),
path(r'users/', views.UserView.as_view()),
path(r'authorizations/', obtain_jwt_token),
]
urlpatterns += routers.urls
|
[
"429771087@qq.com"
] |
429771087@qq.com
|
c8014a758b3a175f52c3012fcc28b6369c99270a
|
e415e4cdab3d1cd04a4aa587f7ddc59e71977972
|
/builtin/comprehension_syntax.py
|
fa9db9d682556dea663d08cffe6ccd076c54a921
|
[] |
no_license
|
nixawk/hello-python3
|
8c3ebba577b39f545d4a67f3da9b8bb6122d12ea
|
e0680eb49d260c5e3f06f9690c558f95a851f87c
|
refs/heads/master
| 2022-03-31T23:02:30.225702
| 2019-12-02T10:15:55
| 2019-12-02T10:15:55
| 84,066,942
| 5
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Comprehension Syntax
# A very common programming task is to produce one series of values based upon
# the processing of another series. Often, this task can be accomplished quite
# simply in Python using what is known as a comprehension syntax.
# [k * k for k in range(1, n+1)] # list comprehension
# {k * k for k in range(1, n+1)} # set comprehension
# (k * k for k in range(1, n+1)) # generator comprehension
# {k: k * k for k in range(1, n+1)} # dictionary comprehension
LIST_A = [1, 2, 3, 4]
print(sum([k * 10 for k in LIST_A])) # output: 100
print(sum({k * 10 for k in LIST_A})) # output: 100
print(sum((k * 10 for k in LIST_A))) # output: 100
|
[
"hap.ddup@gmail.com"
] |
hap.ddup@gmail.com
|
338f19efb58b55b42cd987e0e1ddec5ce0c6c3ca
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/cdn/v20200415/get_profile.py
|
1eded7cd80fac2a555ea3ca6f8ca513c8be3e5e2
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773
| 2020-11-06T22:39:06
| 2020-11-06T22:39:06
| 312,993,761
| 0
| 0
|
Apache-2.0
| 2023-06-02T06:47:28
| 2020-11-15T09:04:00
| null |
UTF-8
|
Python
| false
| false
| 4,875
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetProfileResult',
'AwaitableGetProfileResult',
'get_profile',
]
@pulumi.output_type
class GetProfileResult:
"""
CDN profile is a logical grouping of endpoints that share the same settings, such as CDN provider and pricing tier.
"""
def __init__(__self__, location=None, name=None, provisioning_state=None, resource_state=None, sku=None, tags=None, type=None):
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_state and not isinstance(resource_state, str):
raise TypeError("Expected argument 'resource_state' to be a str")
pulumi.set(__self__, "resource_state", resource_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning status of the profile.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Resource status of the profile.
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
The pricing tier (defines a CDN provider, feature list and rate) of the CDN profile.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetProfileResult(GetProfileResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetProfileResult(
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
resource_state=self.resource_state,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_profile(profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProfileResult:
"""
Use this data source to access information about an existing resource.
:param str profile_name: Name of the CDN profile which is unique within the resource group.
:param str resource_group_name: Name of the Resource group within the Azure subscription.
"""
__args__ = dict()
__args__['profileName'] = profile_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:cdn/v20200415:getProfile', __args__, opts=opts, typ=GetProfileResult).value
return AwaitableGetProfileResult(
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
resource_state=__ret__.resource_state,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
a74ff399c94d7abbb0737bb769e59ed0db02c535
|
bd93fa910151c278be8249055bc084e5a5c35a6a
|
/Python/itcast/01-Python进阶1/4异常/4抛出自定义异常.py
|
45e5cd62b39f425811af7f9113a9ac4885ab6479
|
[] |
no_license
|
ahojcn/practice-code
|
bd81595b80239cd2550183093566bd536a83ed3f
|
b65f4e76271479269463e92fd3fd41585c2ac792
|
refs/heads/master
| 2021-07-10T14:15:08.036592
| 2020-07-09T11:32:16
| 2020-07-09T11:32:16
| 153,059,349
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
class ShortInputException(Exception):
'''自定义异常类'''
def __init__(self, length, atlieast):
self.length = length
self.atlieast = atlieast
def main():
try:
s = input("请输入 -> ")
if len(s) < 3:
# raise 引发一个自定义异常
raise ShortInputException(len(s), 3)
except ShortInputException as e:
print("ShortInputException, 输入长度(%d), 最小长度(%d)" % (e.length, e.atlieast))
if __name__ == "__main__":
main()
|
[
"hanoi_ahoj@icloud.com"
] |
hanoi_ahoj@icloud.com
|
ce0c9e95fea923683045ab70e0cd201076d5ba46
|
d28a65d23c204a9736b597ae510d9dd54d2ffd0f
|
/tests/testUtils/testSctidGenerator.py
|
7f1015fad1169ccb000f226bcefdd7bb6b7e9825
|
[
"BSD-3-Clause"
] |
permissive
|
cts2/rf2db
|
99ba327611e620fc5533245064afcc1daff7c164
|
985cd7ad84c8907306a0d7d309d4a1c0fb422ba4
|
refs/heads/master
| 2020-05-17T22:37:25.476553
| 2015-08-24T22:18:19
| 2015-08-24T22:18:19
| 15,264,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,482
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from rf2db.utils.sctid_generator import *
from rf2db.utils.sctid import sctid
class GeneratorTestCase(unittest.TestCase):
def test_gen(self):
generator = sctid_generator(MAYO_Namespace, sctid_generator.RELATIONSHIP, 1000)
self.assertEqual(10001000134127, generator.next())
self.assertEqual([(1, 10011000134125), (2, 10021000134121), (3, 10031000134123),
(4, 10041000134129), (5, 10051000134126), (6, 10061000134128),
(7, 10071000134120), (8, 10081000134122), (9, 10091000134124)],
list(zip(range(1, 10), generator)))
self.assertEqual(171000160104, (sctid_generator(CIMI_Namespace, sctid_generator.CONCEPT, 17)).next())
self.assertEqual(911431000160119, (sctid_generator(CIMI_Namespace, sctid_generator.DESCRIPTION, 91143)).next())
self.assertEqual(10101000134126, sctid(generator.next()))
self.assertEqual([(1, 10111000134129), (2, 10121000134120), (3, 10131000134122),
(4, 10141000134128), (5, 10151000134125), (6, 10161000134127),
(7, 10171000134124), (8, 10181000134121), (9, 10191000134123)],
[(a, sctid(generator.next())) for a in range(1, 10)])
self.assertEqual(171000160104, sctid((sctid_generator(CIMI_Namespace, sctid_generator.CONCEPT, 17)).next()))
self.assertEqual(911431000160119, sctid((sctid_generator(CIMI_Namespace, sctid_generator.DESCRIPTION, 91143)).next()))
def test_zero_partition(self):
self.assertEqual(123456001, sctid_generator(0, sctid_generator.CONCEPT, 123456).next())
self.assertEqual(654321026, sctid_generator(0, sctid_generator.RELATIONSHIP, 654321).next())
self.assertEqual(5349010, sctid_generator(0, sctid_generator.DESCRIPTION, 5349).next())
if __name__ == '__main__':
unittest.main()
|
[
"solbrig.harold@mayo.edu"
] |
solbrig.harold@mayo.edu
|
0486fbd83ea6d8e49d1a8483cd10867e4bd01827
|
518d911a66485947c5d336e96a842f162ef9caf1
|
/res/scripts/client/messenger/proto/bw/clanlistener.py
|
39c44a4a5bbb7958948df803005d0fa3f6e79198
|
[] |
no_license
|
wotmods/WOTDecompiled
|
84b8e5d32ee73e1356b4d57318eb76dfac6b5220
|
45fd599666c55cb871f6b84b0ec977b9d4baf469
|
refs/heads/master
| 2020-12-25T21:34:26.096544
| 2014-11-05T13:58:39
| 2014-11-05T13:58:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,968
|
py
|
# 2014.10.18 14:44:16 Central European Daylight Time
#Embedded file name: scripts/client/messenger/proto/bw/ClanListener.py
import BigWorld
from PlayerEvents import g_playerEvents
from debug_utils import LOG_DEBUG, LOG_ERROR
from messenger.ext.player_helpers import getPlayerDatabaseID
from messenger.proto.bw.entities import BWUserEntity
from messenger.proto.bw.find_criteria import BWClanChannelFindCriteria
from messenger.proto.entities import CurrentUserEntity
from messenger.proto.events import g_messengerEvents
from messenger.storage import storage_getter
class _INIT_STEPS(object):
CLAN_INFO_RECEIVED = 1
MEMBERS_LIST_RECEIVED = 2
LIST_INITED = CLAN_INFO_RECEIVED | MEMBERS_LIST_RECEIVED
class ClanListener(object):
def __init__(self):
super(ClanListener, self).__init__()
self.__initSteps = 0
self.__clanChannel = None
self.__channelCriteria = BWClanChannelFindCriteria()
@storage_getter('users')
def usersStorage(self):
return None
@storage_getter('playerCtx')
def playerCtx(self):
return None
def start(self):
self.__findClanChannel()
cEvents = g_messengerEvents.channels
cEvents.onChannelInited += self.__ce_onChannelInited
cEvents.onChannelDestroyed += self.__ce_onChannelDestroyed
g_playerEvents.onClanMembersListChanged += self.__pe_onClanMembersListChanged
self.playerCtx.onClanInfoChanged += self.__pc_onClanInfoChanged
def stop(self):
cEvents = g_messengerEvents.channels
cEvents.onChannelInited -= self.__ce_onChannelInited
cEvents.onChannelDestroyed -= self.__ce_onChannelDestroyed
self.__clearClanChannel()
g_playerEvents.onClanMembersListChanged -= self.__pe_onClanMembersListChanged
self.playerCtx.onClanInfoChanged -= self.__pc_onClanInfoChanged
def __findClanChannel(self):
channel = storage_getter('channels')().getChannelByCriteria(self.__channelCriteria)
if channel is not None:
self.__initClanChannel(channel)
def __initClanChannel(self, channel):
if self.__clanChannel is not None:
LOG_ERROR('Clan channel is defined', self.__clanChannel, channel)
return
self.__clanChannel = channel
self.__clanChannel.onMembersListChanged += self.__ce_onMembersListChanged
self.__refreshClanMembers()
def __clearClanChannel(self):
if self.__clanChannel is not None:
self.__clanChannel.onMembersListChanged -= self.__ce_onMembersListChanged
self.__clanChannel = None
for user in self.usersStorage.getClanMembersIterator():
user.update(isOnline=False)
g_messengerEvents.users.onClanMembersListChanged()
def __refreshClanMembers(self):
getter = self.__clanChannel.getMember
changed = False
for user in self.usersStorage.getClanMembersIterator():
dbID = user.getID()
isOnline = user.isOnline()
member = getter(dbID)
if member is not None:
if not isOnline:
user.update(isOnline=True)
changed = True
elif isOnline:
user.update(isOnline=False)
changed = True
if changed:
g_messengerEvents.users.onClanMembersListChanged()
def __pe_onClanMembersListChanged(self):
clanMembers = getattr(BigWorld.player(), 'clanMembers', {})
LOG_DEBUG('setClanMembersList', clanMembers)
if not self.__initSteps & _INIT_STEPS.MEMBERS_LIST_RECEIVED:
self.__initSteps |= _INIT_STEPS.MEMBERS_LIST_RECEIVED
clanAbbrev = self.playerCtx.getClanAbbrev()
members = []
if self.__clanChannel is not None:
getter = self.__clanChannel.getMember
else:
getter = lambda dbID: None
playerID = getPlayerDatabaseID()
for dbID, (name, roleFlags) in clanMembers.iteritems():
isOnline = False if getter(dbID) is None else True
if playerID == dbID:
user = CurrentUserEntity(dbID, name=name, clanAbbrev=clanAbbrev, clanRole=roleFlags)
else:
user = BWUserEntity(dbID, name=name, clanAbbrev=clanAbbrev, clanRole=roleFlags, isOnline=isOnline)
members.append(user)
self.usersStorage._setClanMembersList(members)
if self.__initSteps & _INIT_STEPS.LIST_INITED != 0:
g_messengerEvents.users.onClanMembersListChanged()
def __pc_onClanInfoChanged(self):
clanInfo = self.playerCtx.clanInfo
hasClanInfo = clanInfo is not None and len(clanInfo) > 0
if not self.__initSteps & _INIT_STEPS.CLAN_INFO_RECEIVED and hasClanInfo:
self.__initSteps |= _INIT_STEPS.CLAN_INFO_RECEIVED
user = self.usersStorage.getUser(getPlayerDatabaseID())
if user:
user.update(clanRole=self.playerCtx.getClanRole())
clanAbbrev = self.playerCtx.getClanAbbrev()
for user in self.usersStorage.getClanMembersIterator():
user.update(clanAbbrev=clanAbbrev)
if self.__initSteps & _INIT_STEPS.LIST_INITED != 0:
g_messengerEvents.users.onClanMembersListChanged()
def __ce_onChannelInited(self, channel):
if self.__channelCriteria.filter(channel):
self.__initClanChannel(channel)
def __ce_onChannelDestroyed(self, channel):
if self.__channelCriteria.filter(channel):
self.__clearClanChannel()
def __ce_onMembersListChanged(self):
self.__refreshClanMembers()
+++ okay decompyling res/scripts/client/messenger/proto/bw/clanlistener.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2014.10.18 14:44:16 Central European Daylight Time
|
[
"chodakk@RWAMWCOE31488.emea.roche.com"
] |
chodakk@RWAMWCOE31488.emea.roche.com
|
199294e0b423494211f9880690d3ca663d5aae1e
|
8d402df39c18eba7e1c86c762f205c944357c5df
|
/www/src/Lib/site-packages/ui/menu.py
|
78483584c32e8801d0303aea19d88121aed69b3a
|
[
"BSD-3-Clause"
] |
permissive
|
brython-dev/brython
|
87cc023e25550dec9ce459ba68774189f33712b6
|
b33958bff0e8c7a280babc30232dc389a2500a7a
|
refs/heads/master
| 2023-09-04T04:49:29.156209
| 2023-09-01T06:36:08
| 2023-09-01T06:36:08
| 24,046,239
| 6,569
| 625
|
BSD-3-Clause
| 2023-07-05T06:13:32
| 2014-09-15T06:58:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,270
|
py
|
from . import widget
from browser import html, document
class Menu(html.UL, widget.Widget):
def __init__(self, id=None, style={}):
default_style = dict(position= 'relative', height='auto',
width='auto')
default_style.update(style)
html.UL.__init__(self, Class="ui-widget ui-menu", style=default_style)
widget.Widget.__init__(self, self, 'menu', id)
document.bind('click', self.leave)
self.active = False
def add(self, title):
item = MenuItem(title, Class="ui-widget ui-menu-item")
item.bind('click', item.activate)
item.bind('mouseenter', item.enter)
self <= item
return item
def leave(self, ev):
for child in self.children:
if child.state == 'show':
document.remove(child.div)
child.state = 'hide'
self.active = False
class MenuItem(html.LI):
def __init__(self, *args, **kw):
html.LI.__init__(self, *args, **kw)
self.items = []
self.state = "hide"
def activate(self, ev):
self.parent.active = True
self.show(ev)
ev.stopPropagation()
def enter(self, ev):
if self.parent.active:
self.show(ev)
def show(self, ev):
for item in self.parent.children:
if item.state == "show":
if item == self:
return
document.remove(item.div)
item.state = "hide"
if self.state == "hide":
left = ev.target.left
top = ev.target.top+ev.target.height
self.div = html.DIV(Class="ui-widget ui-menu-sublist",
style=dict(position='absolute', left=left, top=top, zIndex=99))
for item in self.items:
line = html.DIV(item[0], Class="ui-menu-subitem")
if item[1] is not None:
line.bind('click', item[1])
self.div <= line
self.state = "show"
self.div.style.borderWidth = "1px"
document <= self.div
else:
document.remove(self.div)
self.state = "hide"
def add(self, label, callback = None):
self.items.append((label, callback))
|
[
"pierre.quentel@gmail.com"
] |
pierre.quentel@gmail.com
|
03427bc747dcd4f72ae0ca4206d202efd9bd40fe
|
f55f3cd5f44982260fd1bcf711207e3d952499a3
|
/platform_crawler/spiders/pylib/login_qq_with_cli.py
|
b1dd5448a9cec103c4a23412ffb2782281cd30f0
|
[] |
no_license
|
prynix/save_code
|
c065c79d79cc6a5b9181081fa06deaea32af0d78
|
4556d1ad01ed192f91ae210983010ad45bf4635c
|
refs/heads/master
| 2022-02-20T18:32:44.055007
| 2019-08-19T09:51:18
| 2019-08-19T09:51:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,661
|
py
|
import os
import time
import win32gui
import psutil
import logging
from ctypes import windll
from platform_crawler.settings import join, IMG_PATH, GlobalVal
from platform_crawler.configs.excute_paths import ExecutePaths
ACC, u, pag, logger = None, None, None, None
TIM_IMG_PATH = join(IMG_PATH, 'tim_img')
NEW_ERROR_PATH = join(TIM_IMG_PATH, 'new_error_img')
if not os.path.exists(TIM_IMG_PATH):
os.makedirs(TIM_IMG_PATH)
if not os.path.exists(NEW_ERROR_PATH):
os.makedirs(NEW_ERROR_PATH)
img_path = join(TIM_IMG_PATH, 'qq_cli_vc_cf.png')
err_account_img = join(TIM_IMG_PATH, 'err_account.png')
death_acc_img = join(TIM_IMG_PATH, 'death_acc.png')
find_password_img = join(TIM_IMG_PATH, 'find_password.png')
login_success = join(TIM_IMG_PATH, 'login_succ.png')
after_enter_login_btn = join(NEW_ERROR_PATH, 'after_enter.png')
authentication_img = join(TIM_IMG_PATH, 'need_auth.png')
VERIFY_TIMES = 1
def kill_qq():
for e in psutil.process_iter():
a = e.name()
if 'TIM' in a:
e.kill()
def btn_location(img_name_path, loop_time=2, dur=0):
# 获取图片位置
s = time.time()
for e in range(loop_time):
try:
x, y, w, h = pag.locateOnScreen(img_name_path)
logger.info('Find once cost time: %s' % int(time.time() - s))
return x, y
except TypeError:
if dur != 0:
time.sleep(dur)
continue
else:
return False
def handle_login_res(loginid):
result = btn_location(img_path) # vc page
if result:
logger.info('Verify Code Appeared')
return deal_vc(loginid)
elif btn_location(err_account_img): # account error page
kill_qq()
logger.info('Wrong account or password!')
res = False
elif btn_location(death_acc_img):
kill_qq()
logger.info('Frozen account')
res = False
elif btn_location(find_password_img):
kill_qq()
logger.info('Wrong password! Find and recheck')
res = False
elif btn_location(authentication_img):
kill_qq()
logger.info('Need to authentication!')
res = False
elif btn_location(login_success):
logger.info('Tim client login success')
return True
else:
logger.info('Unknown situation with account: %s' % ACC)
res = False
if not res:
pic_name = join(NEW_ERROR_PATH, 'error_%s.png' % (int(time.time())))
pag.screenshot(pic_name)
return res
def deal_vc(loginid):
global VERIFY_TIMES
# cut and deal vc img
img1_path = join(TIM_IMG_PATH, 'qq_cli_vc.png')
pag.screenshot(img1_path, region=(loginid[4][0] + 120, loginid[4][1] + 202, 132, 56))
with open(img1_path, 'br') as f:
im = f.read()
res = u.rc.rk_create(im, '2040')
windll.user32.SetCursorPos(loginid[4][0] + 100, loginid[4][1] + 110)
pag.typewrite(res.get('Result').lower())
pag.hotkey('enter')
time.sleep(0.8)
if VERIFY_TIMES != 1:
u.rc.rk_report_error(res.get('Id'))
VERIFY_TIMES += 1
return handle_login_res(loginid)
def QQ(qq, pwd):
# a = win32gui.FindWindow(None, "QQ")
# 运行QQ
os.system('"%s"' % ExecutePaths.TimPath)
time.sleep(5)
a = win32gui.FindWindow(None, "TIM") # 获取窗口的句柄,参数1: 类名,参数2: 标题QQ
loginid = win32gui.GetWindowPlacement(a)
windll.user32.SetCursorPos(loginid[4][0] + 300, loginid[4][1] + 273)
pag.click()
time.sleep(0.2)
# 输入账号
pag.typewrite(qq)
time.sleep(0.2)
# tab切换
pag.hotkey('tab')
pag.typewrite(pwd)
# 点击回车键登录
pag.hotkey('enter')
time.sleep(3)
pag.screenshot(after_enter_login_btn)
# 判断是否出现验证码 (90,135)
res = handle_login_res(loginid)
if not res:
return False
pag.hotkey('enter')
time.sleep(4)
a = win32gui.FindWindow(None, "TIM") # 获取窗口的句柄,参数1: 类名,参数2: 标题QQ
loginid = win32gui.GetWindowPlacement(a)
pag.click(loginid[4][2]-68, loginid[4][1]+29)
# print(68, 29)
return True
def login_cli(acc, pwd, util):
global u, pag, logger, ACC
u = util
ACC = acc
pag = util.pag
logger = logging.getLogger('%s.login_with_tim' % GlobalVal.CUR_MAIN_LOG_NAME)
kill_qq()
return QQ(acc, pwd)
if __name__ == '__main__':
from platform_crawler.utils.utils import Util
login_cli('2823259680', 'Hhmt123456', Util())
|
[
"zwbworkmail@163.com"
] |
zwbworkmail@163.com
|
f786367311655515bd413905975a7193b98e5326
|
1d8624b84243107bcc82876a74917dac983ba67d
|
/testing/runtests.py
|
2651b5b061b99b7f34e063c74255164e50b8a21d
|
[
"BSD-3-Clause"
] |
permissive
|
nwp90/djorm-ext-pgarray
|
14f6877f61975b4a64d3dd601dbd0101fb191918
|
1d0d3db7b3539a8840dcbdaf8322a72aef0875d2
|
refs/heads/master
| 2021-01-22T00:10:35.570321
| 2013-12-12T20:01:28
| 2013-12-12T20:01:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
# -*- coding: utf-8 -*-
import os, sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from django.core.management import call_command
if __name__ == "__main__":
args = sys.argv[1:]
if len(args) == 0:
argv.append("pg_array_fields")
call_command("test", *args, verbosity=2)
|
[
"niwi@niwi.be"
] |
niwi@niwi.be
|
7957c4004a81dab3bef7da261038849f20e09149
|
7565f8a0b26b97e40494275b90852d2ae1ed0c95
|
/project/app/models.py
|
6989ef06acc164f36bd9a149ada65aa610474539
|
[
"MIT"
] |
permissive
|
iNgredie/advertising-site
|
881a3db8410db5cf776a5cdac8e79bb443c9e925
|
7ce1de769d68d920c36c00df262b3d416d208e4b
|
refs/heads/main
| 2023-01-06T20:22:24.749547
| 2020-11-01T19:00:49
| 2020-11-01T19:00:49
| 307,781,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
from django.db import models
class Ad(models.Model):
title = models.CharField(max_length=200)
description = models.CharField(max_length=1000)
price = models.DecimalField(max_digits=8, decimal_places=2)
photos_urls = models.CharField(max_length=1000)
create_at = models.DateTimeField(auto_now_add=True)
|
[
"sandslash@bk.ru"
] |
sandslash@bk.ru
|
9aa2b79bc01869cd066b241bdaa8f8f51fd66517
|
7097fe9390eb0841951c6e00f4df6df2266c22cc
|
/DYAnalysis/AnalysisCode_8TeV/DimuonAnalysis/DYPackage/test/ShapeR/uncertEE_2D.py
|
c99950559b265c7b123354e0984db972ebde428c
|
[] |
no_license
|
echapon/pA_DY_8TeV
|
35128dc7bcace21197321d01c7a54a67f59a58c9
|
02ead424f33723da7048282dd3a697809ee630b6
|
refs/heads/master
| 2021-07-24T04:48:27.455575
| 2021-02-18T14:57:49
| 2021-02-18T14:57:49
| 84,451,650
| 0
| 2
| null | 2020-06-16T14:09:34
| 2017-03-09T14:33:24
|
C
|
UTF-8
|
Python
| false
| false
| 7,660
|
py
|
#!/usr/bin/env python
from ROOT import *
from math import sqrt, pow
from array import array
import sys, os
from rshape_tools import *
def bambuConverter7_24(h,m_num,m_den):
for ibin_rows in range(1,7):
for ibin_cols in range(24):
if ibin_rows == 6 and ibin_cols > 11: continue
#FIXME what about the last bin?
h.SetBinContent(ibin_cols+1+(ibin_rows-1)*24,100.*sqrt(m_num(ibin_rows,ibin_cols))/m_den(ibin_rows,ibin_cols))
h.SetBinError(ibin_cols+1+(ibin_rows-1)*24,0.)
#print h.GetBinContent(ibin_cols+1+(ibin_rows-1)*24)
#FIXME accept new inouts from Andrius
fAndrius = TFile('../Inputs/sys/table_2D_frac_nBayes1.root')
#
#New source: MC efficiency and pile up
#
heffMC_syst1 = fAndrius.Get('eff_rnd_err')
heffMC_syst = TH1D("effMC_syst","effMC_syst",132,0,132)
for ix in range(heffMC_syst1.GetNbinsX()):
for iy in range(heffMC_syst1.GetNbinsY()):
heffMC_syst.SetBinContent(iy+1+ix*24,100.*sqrt(pow(heffMC_syst1.GetBinContent(ix+1,iy+1),2)))
heffMC_syst.SetBinError(iy+1+ix*24,0.)
#
#New source: pile up
#
hcollCS_syst1 = fAndrius.Get('pileup_err')
hcollCS_syst = TH1D("collCS_syst","collCS_syst",132,0,132)
for ix in range(hcollCS_syst1.GetNbinsX()):
for iy in range(hcollCS_syst1.GetNbinsY()):
hcollCS_syst.SetBinContent(iy+1+ix*24,100.*sqrt(pow(hcollCS_syst1.GetBinContent(ix+1,iy+1),2)))
hcollCS_syst.SetBinError(iy+1+ix*24,0.)
#
#get statistical uncertainty
#
print "Doing stat uncertainty"
fstat1 = TFile('../Inputs/sys/yields_bg-subtracted2D.root')
mstat_full_den = fstat1.Get('YieldsSignal')
fstat2 = TFile('../Inputs/sys/yields2D.root')
mstat_full_num = fstat2.Get('yields_data')
hstat_full = TH1D('hstat_full','hstat_full',132,0,132)
bambuConverter7_24(hstat_full,mstat_full_num, mstat_full_den)
#
#get FSR systematic uncertainty
#
print "Doing FSR syst uncertainty"
#FSR systematics
hsyst_FSR1 = fAndrius.Get("fsr_rnd_err")
hsyst_FSR2 = fAndrius.Get("fsr_model_err")
hsyst_FSR = TH1D('syst_FSR','syst_FSR',132,0,132)
for ix in range(hsyst_FSR1.GetNbinsX()):
for iy in range(hsyst_FSR1.GetNbinsY()):
hsyst_FSR.SetBinContent(iy+1+ix*24,100.*sqrt(pow(hsyst_FSR1.GetBinContent(ix+1,iy+1),2)+pow(hsyst_FSR2.GetBinContent(ix+1,iy+1),2)))
hsyst_FSR.SetBinError(iy+1+ix*24,0.)
#
#get background systematic uncertainty
#
print "Doing background syst uncertainty"
hsyst_bkg0 = fAndrius.Get("bkgr_est_err")
hsyst_bkg = TH1D("syst_bkg","syst_bkg",132,0,132)
for ix in range(hsyst_bkg0.GetNbinsX()):
for iy in range(hsyst_bkg0.GetNbinsY()):
hsyst_bkg.SetBinContent(iy+1+ix*24,100.*hsyst_bkg0.GetBinContent(ix+1,iy+1))
hsyst_bkg.SetBinError(iy+1+ix*24,0.)
#
#get efficiency correction systematics
#
print "Doing eff corr syst uncertainty"
heffcorr_err0 = fAndrius.Get('rho_err')
heffcorr_err = TH1D("effcorr_err","effcorr_err",132,0,132)
for ix in range(heffcorr_err0.GetNbinsX()):
for iy in range(heffcorr_err0.GetNbinsY()):
heffcorr_err.SetBinContent(iy+1+ix*24,100.*heffcorr_err0.GetBinContent(ix+1,iy+1))
heffcorr_err.SetBinError(iy+1+ix*24,0.)
#
#get PDF uncertainty on acceptance (same as dimuons)
#
print "Doing PDF uncertainty"
f_acc_pdf = ROOT.TFile('../Inputs/sys/pdf_syst2D_7TeV.root')
hsys1 = f_acc_pdf.Get('hslice1')
hsys2 = f_acc_pdf.Get('hslice2')
hsys3 = f_acc_pdf.Get('hslice3')
hsys4 = f_acc_pdf.Get('hslice4')
hsys5 = f_acc_pdf.Get('hslice5')
hsys6 = f_acc_pdf.Get('hslice6')
syst_list = [hsys1,hsys2,hsys3,hsys4,hsys5,hsys6]
hacc_pdf = TH1D('hacc_pdf','hacc_pdf',132,0,132)
for ih in range(len(syst_list)):
for ibin in range(syst_list[ih].GetNbinsX()):
#print ih, " ", ibin, " ", 100.*syst_list[ih].GetBinError(ibin+1), " ", ibin+1+ih*syst_list[ih].GetNbinsX()
hacc_pdf.SetBinContent(ibin+1+ih*24,100.*syst_list[ih].GetBinError(ibin+1))
hacc_pdf.SetBinError(ibin+1+ih*24,0.0)
#for ibin in range(hacc_pdf.GetNbinsX()):
# print hacc_pdf.GetBinContent(ibin+1)
#
#get unfolding systematics
#
print "Doing unfolding uncertainty"
hsyst_unf0 = fAndrius.Get("det_resolution_err")
hsyst_unf = TH1D("syst_unf","syst_unf",132,0,132)
for ix in range(hsyst_unf0.GetNbinsX()):
for iy in range(hsyst_unf0.GetNbinsY()):
hsyst_unf.SetBinContent(iy+1+ix*24,100.*hsyst_unf0.GetBinContent(ix+1,iy+1))
hsyst_unf.SetBinError(iy+1+ix*24,0.)
#save stat uncertainty on unfolding for muons
#fout = ROOT.TFile("muon_unf_stat_2D.root","recreate")
##scaling numerator: ok
#hsyst_unf_mu = TH1D('muon_unf_stat','muon_unf_stat',132,0,132)
##scaling denominator ok
#mele_yield = fstat2.Get('yields_data')
#hsyst_unf_ele = TH1D('ele_unf_stat','ele_unf_stat',132,0,132)
#for ibin_rows in range(1,7):
# for ibin_cols in range(24):
# if ibin_rows == 6 and ibin_cols > 11: continue
# #FIXME what about the last bin?
# hsyst_unf_ele.SetBinContent(ibin_cols+1+(ibin_rows-1)*24,mele_yield(ibin_rows,ibin_cols))
# hsyst_unf_ele.SetBinError(ibin_cols+1+(ibin_rows-1)*24,0.)
# #print "XXX ", ibin_cols+1+(ibin_rows-1)*24, " ", hsyst_unf_ele.GetBinContent(ibin_cols+1+(ibin_rows-1)*24)
#
#fraw = ROOT.TFile("../Inputs/rawYield/DYspectrum_Rap_uncorr_2013_tmp_TRMNov.root")
#hmu_yield = fraw.Get("hdata")
#for ibin in range(hsyst_unf_mu.GetNbinsX()):
# #print ibin, " ",
# hsyst_unf_mu.SetBinContent(ibin+1,hsyst_unf.GetBinContent(ibin+1)/sqrt(hmu_yield.GetBinContent(ibin+1)/hsyst_unf_ele.GetBinContent(ibin+1)))
# hsyst_unf_mu.SetBinError(ibin+1,0.)
# print ibin, " XX ", hsyst_unf_mu.GetBinContent(ibin+1)
#
#fout.cd()
#hsyst_unf_mu.Write("muon_unf_stat")
#fout.Close()
#
#get escale systematics (2011)
#
print "Doing escale uncertainty"
hsyst_escale1 = fAndrius.Get("escale_err")
#hsyst_escale2 = fAndrius.Get("unf_escale_res")
hsyst_escale = TH1D('syst_escale','syst_escale',132,0,132)
for ix in range(hsyst_escale1.GetNbinsX()):
for iy in range(hsyst_escale1.GetNbinsY()):
hsyst_escale.SetBinContent(iy+1+ix*24,100.*sqrt(pow(hsyst_escale1.GetBinContent(ibin+1),2))) #+pow(hsyst_escale2.GetBinContent(ibin+1),2)))
hsyst_escale.SetBinError(iy+1+ix*24,0.)
#
#get total xsection systematics as they are filled !
#
print "Doing total uncertainty"
f = TFile("../Outputs/absex_DET2D_PI_Bayesian.root")
thisx = f.Get('hxsec')
this_err = thisx.Clone()
for ibin in range(thisx.GetNbinsX()):
#alternative
this_err.SetBinContent(ibin+1,sqrt(pow(hcollCS_syst.GetBinContent(ibin+1),2)+pow(heffMC_syst.GetBinContent(ibin+1),2)+pow(hsyst_escale.GetBinContent(ibin+1),2)+pow(hsyst_unf.GetBinContent(ibin+1),2)+pow(heffcorr_err.GetBinContent(ibin+1),2)+pow(hsyst_bkg.GetBinContent(ibin+1),2)+pow(hstat_full.GetBinContent(ibin+1),2)+pow(hsyst_FSR.GetBinContent(ibin+1),2)))
#print ibin+1," ",heffMC_syst.GetBinContent(ibin+1)," ",hsyst_escale.GetBinContent(ibin+1)," ",hsyst_unf.GetBinContent(ibin+1)," ",heffcorr_err.GetBinContent(ibin+1)," ",hsyst_bkg.GetBinContent(ibin+1)," ",hstat_full.GetBinContent(ibin+1)," ",hsyst_FSR.GetBinContent(ibin+1)
this_err.SetBinError(ibin+1,0.)
thisx.SetBinError(ibin+1,this_err.GetBinContent(ibin+1))
#print ibin," ",thisx.GetBinContent(ibin+1)
fout = TFile("uncertaintiesEE_2D.root","recreate")
fout.cd()
this_err.Write()
fout.Close()
printHistoIlyaEE_2D(hstat_full, heffcorr_err, hsyst_unf, hsyst_bkg, hsyst_escale, thisx, hacc_pdf,hsyst_FSR, heffMC_syst,hcollCS_syst)
#for ibin in range(hstat_full.GetNbinsX()):
# print ibin+1, ' ', hstat_full.GetBinContent(ibin+1)
#####systematics table
#printHistoStoyanEE_2D(hstat_full, hsyst_escale, heffcorr_err, hsyst_unf, hsyst_bkg, hsyst_FSR, thisx)
#printHisto_2D(thisx)
|
[
"emilien.chapon@cern.ch"
] |
emilien.chapon@cern.ch
|
d1f2ca2daed0f8996c2359f8c6b24f3eaa5d077e
|
65e7bde414934cdda16a10f4905cf1b011166f31
|
/IntMemo/Parser_test.py
|
76dad187ed6fd9ba5ac4307fda49d97f734f1abc
|
[
"MIT"
] |
permissive
|
Wizmann/IntMemo
|
b996ce9238352d2cd69648fea455a48ba7a35b9e
|
2b1c0cf7895dc02cda9da9e3ec0ddbfcf2305b27
|
refs/heads/master
| 2020-05-30T04:27:57.971130
| 2015-03-29T05:55:44
| 2015-03-29T05:55:44
| 32,198,229
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,325
|
py
|
#coding=utf-8
import unittest
from Parser import parser
from Parser import lexer
memo1 = '''
[Metadata]
date: 2015-03-07
title: 你很重要,打比赛已经不行了。我得去造个轮子
[Tags]
categories: python, angularjs, love
difficulty: 5
[Description]
我觉得我已经没有什么潜力可挖了。突然感到有些腻烦。
对于我来说,并不可以谈生活。因为我见到过巫师的水晶球,我向里面看了一眼。
从此不能自拔。
[Process]
# DO NOT EDIT THE THINGS BELOW UNLESS YOU KNOW EXACTLY WHAT YOU ARE DOING
{"date": "2013-01-04", "comment": "Only work no play, make Jake a dull boy."}
'''
class TestParser(unittest.TestCase):
def test_lexer(self):
lexer.input("[maerlyn's]\n[rainbow]")
self.assertEqual('SECTION', lexer.token().type)
self.assertEqual('CR', lexer.token().type)
self.assertEqual('SECTION', lexer.token().type)
def test_parser(self):
result = parser.parse(memo1.strip())
self.assertEqual(len(result), 4)
self.assertEqual(result[0]['section'], '[Metadata]')
self.assertEqual(result[1]['section'], '[Tags]')
self.assertEqual(result[2]['section'], '[Description]')
self.assertEqual(result[3]['section'], '[Process]')
if __name__ == '__main__':
unittest.main()
|
[
"mail.kuuy@gmail.com"
] |
mail.kuuy@gmail.com
|
8cba1c54e9b7375b8e3d4b7a6580186abe1b1406
|
1407537a535255e68164d7495786ca2d08f95370
|
/backend/home/migrations/0001_load_initial_data.py
|
37144435244754865ad3e6f940fa8a1ed3c13eba
|
[] |
no_license
|
crowdbotics-apps/lucky-thunder-27398
|
4976a8d6e49618ca7885101a5fb75d600c32ff87
|
83b4c2a91c79a1547b7f3263555f7faf833bf201
|
refs/heads/master
| 2023-05-05T15:20:55.604463
| 2021-05-25T21:36:30
| 2021-05-25T21:36:30
| 370,833,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 544
|
py
|
from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "lucky-thunder-27398.botics.co"
site_params = {
"name": "Lucky Thunder",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
61dbfba05b6a975dff34da029db039cadfac6fa9
|
58c976db39b69e3f30f1649e4f2c474f8c59224e
|
/chain/settings.py
|
3365db0713189ec5b57ce943ad2ae5a22e20b586
|
[
"Apache-2.0"
] |
permissive
|
JennyLJY/chain
|
4be485ad72b54bb1c6ec7cda4c3e82e6d33f7797
|
f484019a31a65a02f389f2f3aec1aec1b154dc98
|
refs/heads/master
| 2020-03-09T21:44:29.153850
| 2018-04-10T12:02:23
| 2018-04-10T12:02:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,794
|
py
|
"""
Django settings for chain project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os,sys
import djcelery
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'npn1nb&p-eb%rseya)anzsi4uuvk5+enyt1m$_a8&&uy882ak3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*',]
# Application definition
INSTALLED_APPS = [
'jet.dashboard',
'jet',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3',
'asset',
'index',
'tasks',
'rest_framework',
'rest_framework.authtoken',
'djcelery',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chain.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # default
)
ANONYMOUS_USER_ID = -1
WSGI_APPLICATION = 'chain.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'chain',
# 'USER': 'root',
# 'PASSWORD': '111111',
# 'HOST': '127.0.0.1',
# 'PORT': '3306',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
SESSION_ENGINE = 'django.contrib.sessions.backends.db'
LOGIN_URL = '/login.html'
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = False # 注意是False 配合下边时间格式
USE_TZ = False # 如果只是内部使用的系统,这行建议为false,不然会有时区问题
DATETIME_FORMAT = 'Y-m-d H:i:s'
DATE_FORMAT = 'Y-m-d'
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = '/hequan/chain/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
DISPLAY_PER_PAGE = 25
#http://www.django-rest-framework.org/api-guide/permissions/#api-reference
#rest-framework 权限分类,现在是默认所有人都可以访问
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
# 'rest_framework.permissions.AllowAny',
'rest_framework.permissions.IsAdminUser',
),
}
# webssh
web_ssh = "47.94.252.25"
web_port = 8002
## logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[chain] %(levelname)s %(asctime)s %(module)s %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'verbose'
},
},
'loggers': {
'tasks': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'asset': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
},
}
# celery
djcelery.setup_loader()
BROKER_URL = 'redis://127.0.0.1:6379/0' #消息存储数据存储在仓库0
CELERY_RESULT_BACKEND = 'djcelery.backends.database:DatabaseBackend' # 指定 Backend
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = 'Asia/Shanghai'
CELERY_IMPORTS = ('tasks.tasks',)
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler' #这是使用了django-celery默认的数据库调度模型,任务执行周期都被存在你指定的orm数据库中
##jet
JET_DEFAULT_THEME = 'default'
# 主题
JET_THEMES = [
{
'theme': 'default', # theme folder name
'color': '#47bac1', # color of the theme's button in user menu
'title': 'Default' # theme title
},
{
'theme': 'green',
'color': '#44b78b',
'title': 'Green'
},
{
'theme': 'light-green',
'color': '#2faa60',
'title': 'Light Green'
},
{
'theme': 'light-violet',
'color': '#a464c4',
'title': 'Light Violet'
},
{
'theme': 'light-blue',
'color': '#5EADDE',
'title': 'Light Blue'
},
{
'theme': 'light-gray',
'color': '#222',
'title': 'Light Gray'
},
]
# 是否展开所有菜单
JET_SIDE_MENU_COMPACT = True # 菜单不是很多时建议为TRUE
|
[
"hequan2011@sina.com"
] |
hequan2011@sina.com
|
960fe4703e14455f4c229c585910aada0e8aaa45
|
f09dc121f213f2881df3572288b7ee5b39246d73
|
/aliyun-python-sdk-bssopenapi/aliyunsdkbssopenapi/request/v20171214/SetResellerUserQuotaRequest.py
|
10ffd6b52727da42dc0bc5acd388bea160ba9e09
|
[
"Apache-2.0"
] |
permissive
|
hetw/aliyun-openapi-python-sdk
|
2f31378ad6be0896fb8090423f607e9c7d3ae774
|
7443eacee9fbbaa93c7975c6dbec92d3c364c577
|
refs/heads/master
| 2023-01-19T22:42:36.214770
| 2020-12-04T10:55:14
| 2020-12-04T10:55:14
| 318,689,093
| 1
| 0
|
NOASSERTION
| 2020-12-05T03:03:03
| 2020-12-05T03:03:03
| null |
UTF-8
|
Python
| false
| false
| 1,908
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbssopenapi.endpoint import endpoint_data
class SetResellerUserQuotaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'BssOpenApi', '2017-12-14', 'SetResellerUserQuota')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Amount(self):
return self.get_query_params().get('Amount')
def set_Amount(self,Amount):
self.add_query_param('Amount',Amount)
def get_OutBizId(self):
return self.get_query_params().get('OutBizId')
def set_OutBizId(self,OutBizId):
self.add_query_param('OutBizId',OutBizId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Currency(self):
return self.get_query_params().get('Currency')
def set_Currency(self,Currency):
self.add_query_param('Currency',Currency)
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
005534cbb812c1a5259eac995b271e47d1de2375
|
4d74341029f12e7e53b6df9d4e17f8a7b1247305
|
/infra/subsys/frontend/friendlynamed.py
|
5649daca2f2fb0729bcaaa513ac315e620b9aaaa
|
[] |
no_license
|
dr-natetorious/app-FinSurf
|
0a137d1e8dc50b5ba81c2b69e36f89dfb70acdaf
|
799710e046626a6a9e753d37af76fbc421e942e4
|
refs/heads/master
| 2023-02-11T11:47:58.704138
| 2021-01-12T22:30:29
| 2021-01-12T22:30:29
| 315,193,890
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,554
|
py
|
#!/usr/bin/env python3
from infra.reusable.context import InfraContext
from infra.reusable.proxyfrontend import LambdaProxyConstruct
from infra.reusable.pythonlambda import PythonLambda
from aws_cdk import (
core,
aws_s3 as s3,
aws_ec2 as ec2,
aws_apigateway as a,
aws_dynamodb as d,
aws_lambda as lambda_,
aws_iam as iam,
aws_kms as kms,
aws_ssm as ssm,
aws_elasticache as ec,
aws_apigateway as a,
aws_route53 as dns,
aws_route53_targets as dns_t,
aws_certificatemanager as acm,
core
)
class FriendlyNamedLayer(core.Construct):
"""
Configure and deploy the network
"""
def __init__(self, scope: core.Construct, id: str, context:InfraContext, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.security_group = ec2.SecurityGroup(self,'FriendlyNamedSvc-SG',
vpc=context.networking.vpc,
allow_all_outbound=True,
description='Security group for FriendlyNamed service components')
self.security_group.add_ingress_rule(
peer= ec2.Peer.any_ipv4(),
connection=ec2.Port(
protocol=ec2.Protocol.TCP,
string_representation='RedisInbound',
from_port=6379, to_port=6379))
self.subnet_group = ec.CfnSubnetGroup(self,'CacheSubnets',
cache_subnet_group_name='FriendlyNamed-Subnets',
description='Subnet groups for FriendlyNamed service',
subnet_ids= [net.subnet_id for net in context.networking.vpc._select_subnet_objects(subnet_group_name='FriendlyNamed')]
)
self.cluster = ec.CfnCacheCluster(self,'FriendlyNamedStore',
cache_node_type= "cache.t2.micro",
engine='redis',
cluster_name='friendly-named',
num_cache_nodes=1,
auto_minor_version_upgrade=True,
cache_subnet_group_name=self.subnet_group.cache_subnet_group_name,
vpc_security_group_ids=[self.security_group.security_group_id])
self.python_lambda = PythonLambda(self,'Friendly-Named',
build_prefix='artifacts/FinSurf-Friendly-Named',
handler='handler.app',
subnet_group_name='FriendlyNamed',
context=context,
securityGroups= [self.security_group])
self.python_lambda.function.add_environment(
key='REDIS_HOST', value=self.cluster.attr_redis_endpoint_address)
self.python_lambda.function.add_environment(
key='REDIS_PORT', value=self.cluster.attr_redis_endpoint_port)
self.frontend_proxy = LambdaProxyConstruct(self,'FriendlyNamedAPI',
handler=self.python_lambda.function,
context=context)
self.url = self.frontend_proxy.rest_api.url
|
[
"nate@bachmeier"
] |
nate@bachmeier
|
8b9590d0cf08be59f1a87430462a08eeb9637b28
|
f9c98f9c127fa1cd9fba17abe17199fb5440b36b
|
/timber_modisette/Python/assn2/assn16.py
|
28dd681c2ccf78458e5ef28ad2ee4a4eb0242aeb
|
[] |
no_license
|
RibRibble/python_april_2017
|
162e543f97afc77d44fcc858106e4730d3f7f760
|
3cc4240d371a8bad8da2ea085e3675272cca2de3
|
refs/heads/master
| 2021-01-19T01:12:34.667828
| 2017-04-27T22:11:53
| 2017-04-27T22:11:53
| 87,233,010
| 1
| 0
| null | 2017-04-04T20:41:44
| 2017-04-04T20:41:44
| null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
import random
def scores_grades():
arr = []
for i in range(0,10):
arr.append(random.randint(60,100))
print arr
for x in arr:
if x >= 90:
print "score: ", x,"; your grade is an a"
if x >= 80 and x <=89:
print "score: ", x,"; your grade is a b"
if x >= 70 and x <=79:
print "score: ", x,"; your grade is a c"
if x >=60 and x <=69:
print "score: ", x,"; your grade is a d"
print "end of program goodbye"
scores_grades()
|
[
"mister.modistette@gmail.com"
] |
mister.modistette@gmail.com
|
5d7b553d8930d9cca8b7c81a61c81612662bdc3c
|
ea52ecee2fbdac38aa03b7e8bafea0aebe0473da
|
/light_sabers.py
|
bd81a61bfc32c6844cfec58328bba06596dce814
|
[] |
no_license
|
cooklee/obiektowka
|
56e7aa27b4aacff6017731419d6ec90a52d17d74
|
6fa7283bb3e2868205e3eea17257611d4a4f528d
|
refs/heads/master
| 2023-02-21T21:05:01.858270
| 2021-01-27T09:08:35
| 2021-01-27T09:08:35
| 332,677,845
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 463
|
py
|
class LightSabres:
types = {'blue':'light', 'green':'light', 'red':'dark'}
power = 20
def __init__(self, color):
if color in LightSabres.types:
self._color = color
a = LightSabres('red')
b = LightSabres('green')
c = LightSabres('blue')
for item in dir(a):
print(item)
print(a.types, b.types,c.types, sep="\n")
a.types= {
1,2,3
}
print(a.types, b.types,c.types, sep="\n")
a.power = 115
print(a.power, b.power, c.power)
|
[
"sbbogus@gmail.com"
] |
sbbogus@gmail.com
|
68ca1ed8ae7d78c949702a6898dced089513f178
|
22279487bee5c983c13887ba11e6a4cd40e8bbe3
|
/PreprocessData/all_class_files/BusinessAudience.py
|
1d69ab09dfb0c3faa6591e9d1545d3de3d294f62
|
[
"MIT"
] |
permissive
|
DylanNEU/Schema
|
018c9f683c683068422ed7b6392dcebd4ab4d4cd
|
4854720a15894dd814691a55e03329ecbbb6f558
|
refs/heads/main
| 2023-08-30T01:50:20.541634
| 2021-11-01T15:30:41
| 2021-11-01T15:30:41
| 425,238,713
| 1
| 0
|
MIT
| 2021-11-06T12:29:12
| 2021-11-06T12:29:11
| null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
from PreprocessData.all_class_files.Audience import Audience
import global_data
class BusinessAudience(Audience):
def __init__(self, additionalType=None, alternateName=None, description=None, disambiguatingDescription=None, identifier=None, image=None, mainEntityOfPage=None, name=None, potentialAction=None, sameAs=None, url=None, audienceType=None, geographicArea=None, numberOfEmployees=None, yearlyRevenue=None, yearsInOperation=None):
Audience.__init__(self, additionalType, alternateName, description, disambiguatingDescription, identifier, image, mainEntityOfPage, name, potentialAction, sameAs, url, audienceType, geographicArea)
self.numberOfEmployees = numberOfEmployees
self.yearlyRevenue = yearlyRevenue
self.yearsInOperation = yearsInOperation
def set_numberOfEmployees(self, numberOfEmployees):
self.numberOfEmployees = numberOfEmployees
def get_numberOfEmployees(self):
return self.numberOfEmployees
def set_yearlyRevenue(self, yearlyRevenue):
self.yearlyRevenue = yearlyRevenue
def get_yearlyRevenue(self):
return self.yearlyRevenue
def set_yearsInOperation(self, yearsInOperation):
self.yearsInOperation = yearsInOperation
def get_yearsInOperation(self):
return self.yearsInOperation
def __setattr__(self, key, value_list):
if type(value_list).__name__ == "NoneType" or key == "node_id":
self.__dict__[key] = value_list
return
for value in value_list:
str_value = type(value).__name__
if str_value not in global_data.get_table()[key]:
raise ValueError("非法类型!")
self.__dict__[key] = value_list
|
[
"2213958880@qq.com"
] |
2213958880@qq.com
|
5b19855578f5d80893bb70ce0fb3811426b5ca2b
|
8526a11efc8f1d2309033011e9af52049986bf1f
|
/angular_dockerfile/generate_angular_dockerfile.py
|
7a7250e61519ce8731c10e3c31f3b4b5b4066b60
|
[] |
no_license
|
Bakushin10/generate-dockerfile
|
3ff4ba872d0ab463f14b7d778c054bb893e14703
|
d932331527a25c9a527e15329c9d18ffff63fd6b
|
refs/heads/main
| 2023-03-31T17:22:25.918207
| 2021-03-24T08:14:12
| 2021-03-24T08:14:12
| 350,233,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,075
|
py
|
from dockerfile_generator_interface import dockerfileGeneratorInterface
from pprint import pprint
from enums import Enum
from PyInquirer import style_from_dict, Token, prompt
from PyInquirer import Validator, ValidationError
class GenerateDockerfileForAngular(dockerfileGeneratorInterface):
def __init__(self):
self.server = ""
self.project_name = ""
self.SPECE = ""
self.questions()
self.APP_HOME = "ENV APP_HOME=/{}".format(self.project_name)
self.ENV_PROJECT_NAME = "ENV PROJECT_NAME={}".format(self.project_name)
self.apache_dockerfile = [
"FROM node:12.14.1-slim AS builder",
self.APP_HOME,
"WORKDIR $APP_HOME",
self.SPECE,
"COPY ./package.json package.json",
"RUN npm install",
"COPY . .",
"RUN npm run build",
self.SPECE,
"FROM ubuntu:18.04",
self.ENV_PROJECT_NAME,
"ENV DEBIAN_FRONTEND=noninteractive",
"RUN apt-get update && apt-get install -y \\",
" apache2 \\",
" apache2-utils \\",
self.SPECE,
"COPY --from=builder /dcp/dist/$PROJECT_NAME /var/www/html/$PROJECT_NAME",
"COPY --from=builder /dcp/config/apache/000-default.conf /etc/apache2/sites-available/000-default.conf",
"RUN a2enmod headers",
"EXPOSE 80",
'CMD ["apache2ctl", "-D", "FOREGROUND"]'
]
self.nginx_dockerfile = [
"FROM node:12.14.1-slim AS builder",
self.APP_HOME,
"WORKDIR $APP_HOME",
"COPY ./package.json package.json",
"RUN npm install",
"COPY . .",
"RUN npm run build",
self.SPECE,
"FROM nginx:1.19.0-alpine",
self.ENV_PROJECT_NAME,
"COPY --from=builder /dcp/dist/$PROJECT_NAME /dcp/$PROJECT_NAME",
"COPY --from=builder /dcp/config/nginx/nginx.conf /etc/nginx/nginx.conf",
"EXPOSE 80",
'CMD ["nginx", "-g", "daemon off;"]'
]
def generate(self):
f = open("Dockerfile", "a")
dockerfile = self.apache_dockerfile if self.server == "apache" else self.nginx_dockerfile
for i in range(len(dockerfile)):
f.write(dockerfile[i] + "\n")
f.close()
def questions(self):
style = Enum.style
questions = [
{
'type': 'list',
'name': 'server',
'message': 'What is your prefer web server?',
'choices': ['Apache', 'Nginx'],
'filter': lambda val: val.lower()
},
{
'type': 'input',
'name': 'project_name',
'message': 'What\'s your project name?',
#'validate': PhoneNumberValidator
}
]
answers = prompt(questions, style=style)
self.server = answers["server"]
self.project_name = answers["project_name"]
|
[
"shnnagai@gmail.com"
] |
shnnagai@gmail.com
|
c72b48a9ab714fdd1c3751bdc762b71fe0395bae
|
3c8856746c2da97abb50571a1883f8da07707633
|
/core/tests/mediawikiversion_tests.py
|
f6a2185faece910080a9ef0488f196cb415650c1
|
[
"MIT"
] |
permissive
|
Tillsa/pywikibot_test_wikidata
|
5bb7630c53e04a96f4da352921a55037e80c1c28
|
c4b9a1618a5e618305f3abdd359a40f01b14fd90
|
refs/heads/master
| 2021-01-01T05:24:08.575795
| 2016-04-11T16:01:54
| 2016-04-11T16:01:54
| 55,986,811
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,771
|
py
|
# -*- coding: utf-8 -*-
"""Tests for the tools.MediaWikiVersion class."""
#
# (C) Pywikibot team, 2008-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: c6c3f608d44eb5cd10f06136b4aed6f1c2d1e5a4 $'
from pywikibot.tools import MediaWikiVersion as V
from tests.aspects import unittest, TestCase
class TestMediaWikiVersion(TestCase):
"""Test MediaWikiVersion class comparisons."""
net = False
def _make(self, version):
"""Create a MediaWikiVersion instance and check that the str stays."""
v = V(version)
self.assertEqual(str(v), version)
return v
def test_normal_versions(self):
"""Test comparison between release versions."""
self.assertGreater(self._make('1.23'), self._make('1.22.0'))
self.assertTrue(self._make('1.23') == self._make('1.23'))
self.assertEqual(self._make('1.23'), self._make('1.23'))
def test_wmf_versions(self):
"""Test comparison between wmf versions."""
self.assertGreater(self._make('1.23wmf10'), self._make('1.23wmf9'))
self.assertEqual(self._make('1.23wmf10'), self._make('1.23wmf10'))
def test_combined_versions(self):
"""Test comparison between wmf versions and release versions."""
self.assertGreater(self._make('1.23wmf10'), self._make('1.22.3'))
self.assertGreater(self._make('1.23'), self._make('1.23wmf10'))
def test_non_wmf_scheme(self):
"""Test version numbers not following the wmf-scheme."""
self.assertGreater(self._make('1.23alpha'), self._make('1.22.3'))
self.assertGreater(self._make('1.23alpha'), self._make('1.23wmf1'))
self.assertGreater(self._make('1.23beta1'), self._make('1.23alpha'))
self.assertGreater(self._make('1.23beta2'), self._make('1.23beta1'))
self.assertGreater(self._make('1.23-rc.1'), self._make('1.23beta2'))
self.assertGreater(self._make('1.23-rc.2'), self._make('1.23-rc.1'))
self.assertGreater(self._make('1.23'), self._make('1.23-rc.2'))
self.assertEqual(self._make('1.23rc1'), self._make('1.23-rc.1'))
def _version_check(self, version, digits, dev_version, suffix):
v = self._make(version)
self.assertEqual(v.version, digits)
self.assertEqual(v._dev_version, dev_version)
self.assertEqual(v.suffix, suffix)
def test_interpretation(self):
"""Test if the data is correctly interpreted."""
self._version_check('1.23', (1, 23), (4, ), '')
self._version_check('1.23wmf1', (1, 23), (0, 1), 'wmf1')
self._version_check('1.23alpha', (1, 23), (1, ), 'alpha')
self._version_check('1.27.0-alpha', (1, 27, 0), (1, ), '-alpha')
self._version_check('1.23beta1', (1, 23), (2, 1), 'beta1')
self._version_check('1.23rc1', (1, 23), (3, 1), 'rc1')
self._version_check('1.23-rc1', (1, 23), (3, 1), '-rc1')
self._version_check('1.23-rc.1', (1, 23), (3, 1), '-rc.1')
self._version_check('1.23text', (1, 23), (4, ), 'text')
def test_invalid_versions(self):
"""Verify that insufficient version fail creating."""
self.assertRaises(ValueError, V, 'invalid')
self.assertRaises(ValueError, V, '1number')
self.assertRaises(ValueError, V, '1.missing')
self.assertRaises(AssertionError, V, '1.23wmf-1')
def test_generator(self):
"""Test from_generator classmethod."""
self.assertEqual(V.from_generator('MediaWiki 1.2.3'),
self._make('1.2.3'))
self.assertRaises(ValueError, V.from_generator, 'Invalid 1.2.3')
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
|
[
"till.sauerwein@web.de"
] |
till.sauerwein@web.de
|
52f0cbbc67ed8ce99700dbeab99cb58a07941358
|
bede0bbf055a7cffc62808cd6ee4654c02e2a2c4
|
/PlotConfiguration/ISR/2016/unfolding/unfolding_input/nuisances_muon.py
|
5957ef552ada88a3fcb44bd2ff3c658455907e76
|
[] |
no_license
|
bhoh/MultiUniv
|
ec4d94180971542d8c4d69726c4e26a3f90596ef
|
1105e8203ce650457bb9cbfb985a90323845c8b0
|
refs/heads/master
| 2020-04-24T07:33:41.915446
| 2020-02-13T10:13:40
| 2020-02-13T10:13:40
| 171,802,629
| 0
| 0
| null | 2019-06-03T06:49:59
| 2019-02-21T04:54:20
|
C
|
UTF-8
|
Python
| false
| false
| 5,245
|
py
|
#nuisances['lumi'] = {
# 'name' : 'lumi_13TeV',
# 'samples' : {
# 'DY' : '1.023',
# }
# 'type' : 'lnN',
# }
########## Efficiency and Energy Scale
trg_syst = ['muon_double_trigSFUp_DoubleMuon_POGTightWithTightIso/muon_double_trigSF_DoubleMuon_POGTightWithTightIso', 'muon_double_trigSFDn_DoubleMuon_POGTightWithTightIso/muon_double_trigSF_DoubleMuon_POGTightWithTightIso']
id_syst = ['muon_double_idSFUp_POGTightWithTightIso/muon_double_idSF_POGTightWithTightIso', 'muon_double_idSFDn_POGTightWithTightIso/muon_double_idSF_POGTightWithTightIso']
iso_syst = ['muon_double_isoSFUp_POGTightWithTightIso/muon_double_isoSF_POGTightWithTightIso','muon_double_isoSFDn_POGTightWithTightIso/muon_double_isoSF_POGTightWithTightIso']
l1prefire_syst = ['evt_weight_l1prefire_up/evt_weight_l1prefire','evt_weight_l1prefire_down/evt_weight_l1prefire']
pileup_syst = ['evt_weight_pureweight_up/evt_weight_pureweight','evt_weight_pureweight_down/evt_weight_pureweight']
alphaS_syst = 'PDFWeights_AlphaS'
pdfScale_syst = 'PDFWeights_Scale'
pdfErr_syst = 'PDFWeights_Error'
#id_syst_ele = ['LepSF'+Nlep+'l_ele_'+eleWP+'_Up', 'LepSF'+Nlep+'l_ele_'+eleWP+'_Do']
nuisances['trig_sf'] = {
'name' : 'trgSF',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
'DYJetsToMuMu' : trg_syst ,
'DYJets10to50ToMuMu' : trg_syst ,
'DYJetsToTauTau' : trg_syst ,
'DYJets10to50ToTauTau' : trg_syst ,
'TTLL_powheg' : trg_syst ,
'WJets_MG' : trg_syst ,
'WW_pythia' : trg_syst ,
'WZ_pythia' : trg_syst ,
'ZZ_pythia' : trg_syst ,
},
}
nuisances['id_sf'] = {
'name' : 'IdSF',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
'DYJetsToMuMu' : id_syst ,
'DYJets10to50ToMuMu' : id_syst ,
'DYJetsToTauTau' : id_syst ,
'DYJets10to50ToTauTau' : id_syst ,
'TTLL_powheg' : id_syst ,
'WJets_MG' : id_syst ,
'WW_pythia' : id_syst ,
'WZ_pythia' : id_syst ,
'ZZ_pythia' : id_syst ,
},
}
nuisances['iso_sf'] = {
'name' : 'IsoSF',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
'DYJetsToMuMu' : iso_syst ,
'DYJets10to50ToMuMu' : iso_syst ,
'DYJetsToTauTau' : iso_syst ,
'DYJets10to50ToTauTau' : iso_syst ,
'TTLL_powheg' : iso_syst ,
'WJets_MG' : iso_syst ,
'WW_pythia' : iso_syst ,
'WZ_pythia' : iso_syst ,
'ZZ_pythia' : iso_syst ,
},
}
nuisances['l1prefire'] = {
'name' : 'L1Prefire',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
'DYJetsToMuMu' : l1prefire_syst ,
'DYJets10to50ToMuMu' : l1prefire_syst ,
'DYJetsToTauTau' : l1prefire_syst ,
'DYJets10to50ToTauTau' : l1prefire_syst ,
'TTLL_powheg' : l1prefire_syst ,
'WJets_MG' : l1prefire_syst ,
'WW_pythia' : l1prefire_syst ,
'WZ_pythia' : l1prefire_syst ,
'ZZ_pythia' : l1prefire_syst ,
},
}
nuisances['pileup'] = {
'name' : 'PU',
'kind' : 'weight',
'type' : 'shape',
'samples' : {
'DYJetsToMuMu' : pileup_syst ,
'DYJets10to50ToMuMu' : pileup_syst ,
'DYJetsToTauTau' : pileup_syst ,
'DYJets10to50ToTauTau' : pileup_syst ,
'TTLL_powheg' : pileup_syst ,
'WJets_MG' : pileup_syst ,
'WW_pythia' : pileup_syst ,
'WZ_pythia' : pileup_syst ,
'ZZ_pythia' : pileup_syst ,
},
}
nuisances['alphaS'] = {
'name' : 'AlphaS',
'kind' : 'PDF',
'type' : 'alphaS',
'samples' : {
'DYJetsToMuMu' : alphaS_syst ,
'DYJets10to50ToMuMu' : alphaS_syst ,
'DYJetsToTauTau' : alphaS_syst ,
'DYJets10to50ToTauTau' : alphaS_syst ,
'TTLL_powheg' : alphaS_syst ,
'WJets_MG' : alphaS_syst ,
'WW_pythia' : alphaS_syst ,
'WZ_pythia' : alphaS_syst ,
'ZZ_pythia' : alphaS_syst ,
},
}
nuisances['pdfScale'] = {
'name' : 'Scale',
'kind' : 'PDF',
'type' : 'Scale',
'samples' : {
'DYJetsToMuMu' : pdfScale_syst ,
'DYJets10to50ToMuMu' : pdfScale_syst ,
'DYJetsToTauTau' : pdfScale_syst ,
'DYJets10to50ToTauTau' : pdfScale_syst ,
'TTLL_powheg' : pdfScale_syst ,
'WJets_MG' : pdfScale_syst ,
'WW_pythia' : pdfScale_syst ,
'WZ_pythia' : pdfScale_syst ,
'ZZ_pythia' : pdfScale_syst ,
},
}
nuisances['pdfErr'] = {
'name' : 'PDFerror',
'kind' : 'PDF',
'type' : 'HESSIAN',
'samples' : {
'DYJetsToMuMu' : pdfErr_syst ,
'DYJets10to50ToMuMu' : pdfErr_syst ,
'DYJetsToTauTau' : pdfErr_syst ,
'DYJets10to50ToTauTau' : pdfErr_syst ,
'TTLL_powheg' : pdfErr_syst ,
'WJets_MG' : pdfErr_syst ,
'WW_pythia' : pdfErr_syst ,
'WZ_pythia' : pdfErr_syst ,
'ZZ_pythia' : pdfErr_syst ,
},
}
|
[
"jhkim@cern.ch"
] |
jhkim@cern.ch
|
0ccc651bb5faebcb8a57fbc6a17b6476ed21a236
|
b28d13b2e785398f1a8074e0034080539009c837
|
/django-rest-routers/snippets/urls.py
|
f9fbae66282440d82cee06df73164e9686a966eb
|
[] |
no_license
|
sdugaro/django
|
c58f1c290a1cadf90d723083c1bceefbbac99073
|
1704f1796cb3f25cac260c6120becd70e9f1c33f
|
refs/heads/main
| 2023-02-06T22:06:41.872202
| 2020-12-27T09:04:12
| 2020-12-27T09:04:12
| 311,162,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
from django.urls import path, include
from snippets import views
from rest_framework.routers import DefaultRouter
#from rest_framework.urlpatterns import format_suffix_patterns
# Use a conventional Router from the rest_framework
# instead of designing your own URL configuration.
# Create a DefaultRouter and register viewsets
router = DefaultRouter()
router.register(r'snippets', views.SnippetViewSet)
router.register(r'users', views.UserViewSet)
# Include conventional urls derived from Router ViewSets
# Note that the DefaultRouter also determines the api_root
# function based view automatically the view class
urlpatterns = [
#path('', views.api_root),
path('', include(router.urls))
]
# allow for suffixed endpoint redirection
#urlpatterns = format_suffix_patterns(urlpatterns)
|
[
"sdugaro@yahoo.com"
] |
sdugaro@yahoo.com
|
bfc1cb8fc68f40887ac4c4db66cfe3d73bc5f6da
|
0d8d794d06827aea3ad460bd7ffc58a63911b21d
|
/Python/Piling Up!.py
|
00ad5a646dd183016adf6b9a008d015313180e08
|
[] |
no_license
|
IamOmaR22/HackerRank-Problems-Solve-and-Programs-Practice-with-Python
|
fe1f70d2f791d15636f7a55419fd006bd952f4f5
|
c3057bd92c75c771877f9f469361a063b8db0915
|
refs/heads/master
| 2023-02-22T05:21:35.292396
| 2021-01-25T11:43:05
| 2021-01-25T11:43:05
| 263,082,392
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
if __name__ in '__main__':
t = int(input())
for i in range(0, t):
n = int(input())
lst = list(map(int, input().split()))
min_index = lst.index(min(lst))
left = lst[ : min_index]
right = lst[min_index : ]
if left == sorted(left, reverse = True) and right == sorted(right, reverse = False):
print("Yes")
else:
print("No")
|
[
"iamomar022@gmail.com"
] |
iamomar022@gmail.com
|
65fa18f14f9c38a6a56da56ca671eaf719578587
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/compareVersions_20200909134459.py
|
f931ca7d437ba6dcc15d17e76c8b91aeb128789e
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 755
|
py
|
def compare(version1,version2):
# split where there are ,
# then loop through both of them
# if v1 > v2 return 1
# if v1 < v2 return -1
# otherwise return 0
v1 = [int(i) for i in version1.split(".")]
v2= [int(i) for i in version2.split(".")]
# if len(v1) > len(v2):
# while len(v1) !=len(v2):
# v2.append(0)
# else:
# while len(v1) !=len(v2):
# v1.append(0)
for i in range(len(v1)):
if v1[i] > v2[i] or (v1[i] is not None and v2[i] is None):
return 1
elif v1[i] < v2[i] or (v2[i] is not None and v1[i] is None):
return -1
return 0
print(compare("1.0.1","1"))
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
6ae0c683f2f431ac46ead2b62106f95b3dc5e8d8
|
0a46b027e8e610b8784cb35dbad8dd07914573a8
|
/scripts/venv/lib/python2.7/site-packages/cogent/data/molecular_weight.py
|
2f10556a9b24528be62ac1cd7c12065b83fd9cf0
|
[
"MIT"
] |
permissive
|
sauloal/cnidaria
|
bb492fb90a0948751789938d9ec64677052073c3
|
fe6f8c8dfed86d39c80f2804a753c05bb2e485b4
|
refs/heads/master
| 2021-01-17T13:43:17.307182
| 2016-10-05T14:14:46
| 2016-10-05T14:14:46
| 33,726,643
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,884
|
py
|
#!/usr/bin/env Python
"""Data for molecular weight calculations on proteins and nucleotides."""
__author__ = "Rob Knight"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Rob Knight"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Rob Knight"
__email__ = "rob@spot.colorado.edu"
__status__ = "Production"
ProteinWeights = {
'A': 89.09,
'C': 121.16,
'D': 133.10,
'E': 147.13,
'F': 165.19,
'G': 75.07,
'H': 155.16,
'I': 131.18,
'K': 146.19,
'L': 131.18,
'M': 149.21,
'N': 132.12,
'P': 115.13,
'Q': 146.15,
'R': 174.20,
'S': 105.09,
'T': 119.12,
'V': 117.15,
'W': 204.23,
'Y': 181.19,
'U': 168.06,
}
RnaWeights = {
'A': 313.21,
'U': 290.17,
'C': 289.19,
'G': 329.21,
}
DnaWeights = {
'A': 297.21,
'T': 274.17,
'C': 273.19,
'G': 313.21,
}
ProteinWeightCorrection = 18.0 #terminal residues not dehydrated
DnaWeightCorrection = 61.96 #assumes 5' monophosphate, 3' OH
RnaWeightCorrection = DnaWeightCorrection
class WeightCalculator(object):
"""Calculates molecular weight of a non-degenerate sequence."""
def __init__(self, Weights, Correction):
"""Returns a new WeightCalculator object (class, so serializable)."""
self.Weights = Weights
self.Correction = Correction
def __call__(self, seq, correction=None):
"""Returns the molecular weight of a specified sequence."""
if not seq:
return 0
if correction is None:
correction = self.Correction
get_mw = self.Weights.get
return sum([get_mw(i, 0) for i in seq]) + correction
DnaMW = WeightCalculator(DnaWeights, DnaWeightCorrection)
RnaMW = WeightCalculator(RnaWeights, DnaWeightCorrection)
ProteinMW = WeightCalculator(ProteinWeights, ProteinWeightCorrection)
|
[
"sauloal@gmail.com"
] |
sauloal@gmail.com
|
8691a9bc6ee66bfd8d8db9b0642ffaa84c910880
|
51474e20f976b9d2d85c870386ae8e7b74a98a63
|
/mla/fm.py
|
85964a99d83bede4a8b36a499f86ffd837c721f7
|
[
"MIT"
] |
permissive
|
Fage2016/MLAlgorithms
|
d191a579d97438cc593d5c1d883d8bdffe0eea78
|
035e489a879d01a84fffff74885dc6b1bca3c96f
|
refs/heads/master
| 2023-03-07T14:50:51.861322
| 2022-01-31T06:13:40
| 2022-01-31T06:13:40
| 73,798,801
| 0
| 0
|
MIT
| 2023-02-04T23:47:22
| 2016-11-15T09:39:07
|
Python
|
UTF-8
|
Python
| false
| false
| 2,594
|
py
|
# coding:utf-8
import autograd.numpy as np
from autograd import elementwise_grad
from mla.base import BaseEstimator
from mla.metrics import mean_squared_error, binary_crossentropy
np.random.seed(9999)
"""
References:
Factorization Machines http://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf
"""
class BaseFM(BaseEstimator):
def __init__(
self, n_components=10, max_iter=100, init_stdev=0.1, learning_rate=0.01, reg_v=0.1, reg_w=0.5, reg_w0=0.0
):
"""Simplified factorization machines implementation using SGD optimizer."""
self.reg_w0 = reg_w0
self.reg_w = reg_w
self.reg_v = reg_v
self.n_components = n_components
self.lr = learning_rate
self.init_stdev = init_stdev
self.max_iter = max_iter
self.loss = None
self.loss_grad = None
def fit(self, X, y=None):
self._setup_input(X, y)
# bias
self.wo = 0.0
# Feature weights
self.w = np.zeros(self.n_features)
# Factor weights
self.v = np.random.normal(scale=self.init_stdev, size=(self.n_features, self.n_components))
self._train()
def _train(self):
for epoch in range(self.max_iter):
y_pred = self._predict(self.X)
loss = self.loss_grad(self.y, y_pred)
w_grad = np.dot(loss, self.X) / float(self.n_samples)
self.wo -= self.lr * (loss.mean() + 2 * self.reg_w0 * self.wo)
self.w -= self.lr * w_grad + (2 * self.reg_w * self.w)
self._factor_step(loss)
def _factor_step(self, loss):
for ix, x in enumerate(self.X):
for i in range(self.n_features):
v_grad = loss[ix] * (x.dot(self.v).dot(x[i])[0] - self.v[i] * x[i] ** 2)
self.v[i] -= self.lr * v_grad + (2 * self.reg_v * self.v[i])
def _predict(self, X=None):
linear_output = np.dot(X, self.w)
factors_output = np.sum(np.dot(X, self.v) ** 2 - np.dot(X ** 2, self.v ** 2), axis=1) / 2.0
return self.wo + linear_output + factors_output
class FMRegressor(BaseFM):
def fit(self, X, y=None):
super(FMRegressor, self).fit(X, y)
self.loss = mean_squared_error
self.loss_grad = elementwise_grad(mean_squared_error)
class FMClassifier(BaseFM):
def fit(self, X, y=None):
super(FMClassifier, self).fit(X, y)
self.loss = binary_crossentropy
self.loss_grad = elementwise_grad(binary_crossentropy)
def predict(self, X=None):
predictions = self._predict(X)
return np.sign(predictions)
|
[
"me@rushter.com"
] |
me@rushter.com
|
29c5d279f4d670c345637bd7f5d6167924532aa7
|
cdd79cef15bdf6a0b9098e27028bbe38607bc288
|
/深さ優先探索/ABC177_D_Friends_dfs.py
|
943a19f60eeb924be107769dc2e7bfa0f536459a
|
[] |
no_license
|
nord2sudjp/atcoder
|
ee35a3eb35717485dc62627172de24c9dac102fb
|
6b1cc5102a615492cc7ff8a33813bbb954641782
|
refs/heads/master
| 2023-08-25T11:27:14.205593
| 2021-09-27T05:43:04
| 2021-09-27T05:43:04
| 302,855,505
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
import sys
sys.setrecursionlimit(1000000)
f=lambda:map(int,input().split())
N,M=f()
if M==0:
print(1)
exit()
G=[set() for _ in range(N+1)]
for _ in range(M):
a,b=f()
G[a].add(b)
G[b].add(a)
# print(G)
d={}
F=[0]*(N+1)
def dfs(i,n):
if F[i]:return
F[i]=1
for g in G[i]:
if F[g]:continue
t=d.get(n, set())
t.add(g)
d[n]=t
dfs(g,n)
for i in range(1,N+1):
dfs(i,i)
print(max(len(i) for i in d.values())+1)
|
[
"nord2sudjp@gmail.com"
] |
nord2sudjp@gmail.com
|
85b44a6d09829fe02164b0352254195d4a9b9f10
|
2e98deb1931aca5f69434e85010153b8b4b7f76e
|
/tests/json-to-yaml.py
|
509903970d379fc1d097d98af3568ecece39d6ad
|
[
"Apache-2.0"
] |
permissive
|
marklap/taurus
|
8ec1ff80bbfd3f38f620930e88500b9ff7b3528b
|
8a485d05b3890bd842d627e53deccfc2d21eb2b8
|
refs/heads/master
| 2021-01-15T11:19:24.722950
| 2015-03-31T21:19:09
| 2015-03-31T21:19:09
| 33,209,884
| 0
| 0
| null | 2015-03-31T21:05:18
| 2015-03-31T21:05:18
| null |
UTF-8
|
Python
| false
| false
| 268
|
py
|
import os
import sys
import tempfile
from bzt.engine import Configuration
fp, filename = tempfile.mkstemp()
os.write(fp, sys.stdin.read())
conf = Configuration()
conf.load([filename])
conf.dump(filename, Configuration.YAML)
sys.stdout.write(open(filename).read())
|
[
"apc4@ya.ru"
] |
apc4@ya.ru
|
40a59c771348d96bbfa8ef02b04543eb7b724b1d
|
f1d2a86b7dd93f4ddafa8961780775a28e7b4508
|
/GeneralPractice/1606. Find Servers That Handled Most Number of Requests.py
|
879d823415d8ec7fab5bc2919834d48cf4986b64
|
[] |
no_license
|
deepika087/CompetitiveProgramming
|
76f8c1451fce1a8e3c94656f81a5b04363987dc6
|
d40c24736a6fee43b56aa1c80150c5f14be4ff22
|
refs/heads/master
| 2021-06-12T02:26:22.374506
| 2021-02-20T19:27:57
| 2021-02-20T19:27:57
| 70,208,474
| 10
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,097
|
py
|
__author__ = 'deepika'
"""
It is one of those problems in which data structue definition is more challenging than the actual code.
Time complexity : O(n log k)
"""
import heapq
class Solution(object):
def busiestServers(self, k, arrival, load):
"""
:type k: int
:type arrival: List[int]
:type load: List[int]
:rtype: List[int]
"""
avail = list(range(k))
count = [0]*k
pq = []
for i, a in enumerate(arrival):
while pq and pq[0][0] <= a:
_, x = heapq.heappop(pq)
heapq.heappush(avail, i + (x-i)%k)
if avail:
poppedServer = heapq.heappop(avail) % k
heapq.heappush(pq, (a + load[i], poppedServer))
count[poppedServer] += 1
max_count = max(count)
return [i for i in range(k) if count[i] == max_count] # this is important. Finding max again and again will increase the time complexity of this problem.
s=Solution()
print(s.busiestServers(k = 3, arrival = [1,2,3,4,5], load = [5,2,3,3,3] ))
|
[
"deepika_087@yahoo.com"
] |
deepika_087@yahoo.com
|
6b22025736585ebd7fb592f4545cad2786c6e33d
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/models/ms_data/datatypes/facets/ncname/ncname_max_length002_xsd/__init__.py
|
635209195235b0d6fbe5b2195b93e98c31d1ab97
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 173
|
py
|
from output.models.ms_data.datatypes.facets.ncname.ncname_max_length002_xsd.ncname_max_length002 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
5956b9e2ef9c61de2a591591bdfab2f3667d3ebd
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_meticulous.py
|
4c49fb55ebd4ada7e02a5ab1909db460ca891293
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
#calss header
class _METICULOUS():
def __init__(self,):
self.name = "METICULOUS"
self.definitions = [u'very careful and with great attention to every detail: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
d087aa0095c89a0fbc965b19d74bfc369e4e7aae
|
0d0afd1dce972b4748ce8faccd992c019794ad9e
|
/integra/integra_caixa/models/caixa_movimento_base.py
|
1041f410fce8c130cc33437f28d907cb150625b6
|
[] |
no_license
|
danimaribeiro/odoo-erp
|
e2ca2cfe3629fbedf413e85f7c3c0453fd16941e
|
d12577bf7f5266b571cbedeb930720d653320e96
|
refs/heads/master
| 2020-01-23T21:32:16.149716
| 2016-11-05T15:35:40
| 2016-11-05T15:35:40
| 67,892,809
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,754
|
py
|
# -*- coding: utf-8 -*-
# from __future__ import division, print_function, unicode_literals
from osv import orm, fields
from pybrasil.data import parse_datetime, formata_data, data_hora_horario_brasilia
SALVA = True
class caixa_movimento_base(orm.AbstractModel):
_name = 'caixa.movimento_base'
def _get_data(self, cr, uid, ids, nome_campo, args=None, context={}):
res = {}
for mov_obj in self.browse(cr, uid, ids):
if nome_campo in ['data_abertura', 'dia_abertura', 'mes_abertura', 'ano_abertura', 'dia_abertura_display', 'mes_abertura_display']:
if mov_obj.data_hora_abertura:
data = parse_datetime(mov_obj.data_hora_abertura)
data = data_hora_horario_brasilia(data)
if nome_campo == 'dia_abertura_display':
data = formata_data(data, '%d/%m/%Y')
elif nome_campo == 'dia_abertura':
data = formata_data(data, '%d/%m/%Y')
elif nome_campo == 'mes_abertura_display':
data = formata_data(data, '%B de %Y')
elif nome_campo == 'mes_abertura':
data = formata_data(data, '%B de %Y')
elif nome_campo == 'ano_abertura':
data = formata_data(data, '%Y')
else:
data = False
elif nome_campo in ['data_fechamento', 'dia_fechamento', 'mes_fechamento', 'ano_fechamento', 'dia_fechamento_display', 'mes_fechamento_display']:
if mov_obj.data_hora_fechamento:
data = parse_datetime(mov_obj.data_hora_fechamento)
data = data_hora_horario_brasilia(data)
if nome_campo == 'dia_fechamento_display':
data = formata_data(data, '%d/%m/%Y')
elif nome_campo == 'dia_fechamento':
data = formata_data(data, '%d/%m/%Y')
elif nome_campo == 'mes_fechamento_display':
data = formata_data(data, '%B de %Y')
elif nome_campo == 'mes_fechamento':
data = formata_data(data, '%B de %Y')
elif nome_campo == 'ano_fechamento':
data = formata_data(data, '%Y')
else:
data = False
res[mov_obj.id] = data
return res
_columns = {
'data_hora_abertura': fields.datetime(u'Data de abertura', required=True, select=True),
'data_abertura': fields.function(_get_data, type='date', string=u'Data de abertura', store=SALVA, select=True),
'dia_abertura': fields.function(_get_data, type='char', string=u'Dia de abertura', store=SALVA, select=True),
'mes_abertura': fields.function(_get_data, type='char', string=u'Mês de abertura', store=SALVA, select=True),
'ano_abertura': fields.function(_get_data, type='char', string=u'Ano de abertura', store=SALVA, select=True),
'data_hora_fechamento': fields.datetime(u'Data de fechamento', select=True),
'data_fechamento': fields.function(_get_data, type='date', string=u'Data de fechamento', store=SALVA, select=True),
'dia_fechamento': fields.function(_get_data, type='char', string=u'Dia de fechamento', store=SALVA, select=True),
'mes_fechamento': fields.function(_get_data, type='char', string=u'Mês de fechamento', store=SALVA, select=True),
'ano_fechamento': fields.function(_get_data, type='char', string=u'Ano de fechamento', store=SALVA, select=True),
}
_defaults = {
'data_hora_abertura': fields.datetime.now,
}
caixa_movimento_base()
|
[
"danimaribeiro@gmail.com"
] |
danimaribeiro@gmail.com
|
bd2595569bf3a56f1f02f986ee1e36549ef16666
|
62ea331d8da218e65a4aee517f4473110f80c03c
|
/bonus_points/migrations/0012_remove_userbonussummary_bonus_points_added.py
|
3abf58d99c8ead844f6402388f99f4e7c39dbc8f
|
[] |
no_license
|
maddrum/world_cup_results
|
11f47a1b0f9a68a0761c7d83d25cc1efb57c2240
|
282d8f55344ba718ea371a22f34454673f23a615
|
refs/heads/master
| 2020-03-20T05:40:44.173185
| 2018-07-16T13:12:15
| 2018-07-16T13:12:15
| 136,724,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 372
|
py
|
# Generated by Django 2.0.2 on 2018-07-08 12:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bonus_points', '0011_userbonussummary_bonus_points_added'),
]
operations = [
migrations.RemoveField(
model_name='userbonussummary',
name='bonus_points_added',
),
]
|
[
"maddrum9@gmail.com"
] |
maddrum9@gmail.com
|
86b909ea6e240fb015ef52450aafadc2ae431396
|
43d138375c9068b3ea9dc0f75bab9d3f3bc0b8f1
|
/polling_stations/apps/data_collection/management/commands/import_gosport.py
|
5836560a0aa2361284d553d80dc5010b0b82230f
|
[] |
permissive
|
dantagg/UK-Polling-Stations
|
69da767c934dd38d74d7ff632fefacdae82053cb
|
2271b3fbfe5242de659892d24fad4d8851c804ba
|
refs/heads/master
| 2023-01-03T20:19:58.315246
| 2020-09-28T11:09:40
| 2020-09-28T11:10:29
| 300,301,660
| 0
| 0
|
BSD-3-Clause
| 2020-10-01T14:07:00
| 2020-10-01T14:06:59
| null |
UTF-8
|
Python
| false
| false
| 3,585
|
py
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000088"
addresses_name = (
"parl.2019-12-12/Version 1/2019 PGE - Democracy_Club__12December2019gosport.TSV"
)
stations_name = (
"parl.2019-12-12/Version 1/2019 PGE - Democracy_Club__12December2019gosport.TSV"
)
elections = ["parl.2019-12-12"]
csv_delimiter = "\t"
allow_station_point_from_postcode = False
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"37013642", # PO122BY -> PO123BY : Duncan Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Hood Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Onslow Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Ussher Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Beaufort Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Ramsey Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Rodney Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Cunningham Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Inman Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Nelson Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Oates Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Vian Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Yarmouth Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Keyes Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : JRAC Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Esmonde Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Whitworth Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Benbow Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Sommerville Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Phoebe Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Quiberon Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Salisbury Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Jervis Block, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Mountbatten Block, HMS Sultan, Military Road, Gosport, Hampshire
"37042796", # PO122BY -> PO123BG : The Wardroom, HMS Sultan, Military Road, Gosport, Hampshire
"37013642", # PO122BY -> PO123BY : Lefanu Block, HMS Sultan, Military Road, Gosport, Hampshire
]:
rec["accept_suggestion"] = True
return rec
|
[
"chris.shaw480@gmail.com"
] |
chris.shaw480@gmail.com
|
5fce8eb114c61690bccaf214eb1d37748c5e824f
|
d6c059cb3a9d58904464f048f5ea195857bc005d
|
/0730-线程、进程、协程/2、进程/9、封装进程对象/aspiringProcess.py
|
664a50d5be61970e84dcde536681414842c698fe
|
[] |
no_license
|
j415/DjangoBasics
|
19f49f5dad9635c6e12e13ec43e728b2a808216c
|
2d738d655bd996e8755611caf090e3aea8f321ed
|
refs/heads/master
| 2020-03-23T22:03:34.874545
| 2018-09-05T03:31:14
| 2018-09-05T03:31:14
| 142,150,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
from multiprocessing import Process
import os, time
class AspiringProcess(Process):
def __init__(self, name):
Process.__init__(self)
self.name = name
def run(self):
print('子进程(%s-%s)启动' % (self.name, os.getpid()))
# 子进程的功能
time.sleep(3)
print('子进程(%s-%s)结束' % (self.name, os.getpid()))
|
[
"1720049083@qq.com"
] |
1720049083@qq.com
|
2a7acf79a0890d9bf7c03f9a000a6219641ec060
|
3ef12b52ab73d12d0a6ee1ba818f561edbb2e4f8
|
/django_app/music/migrations/0001_initial.py
|
8092eb5b7224fc751c08d76f05dc2a5b338c2eea
|
[] |
no_license
|
juliahwang/WS_practice
|
7978f73fb3813632fba531f27fcb08548ab51dc0
|
70760095eea88b67113676a55ca7e18ec0224afb
|
refs/heads/master
| 2021-01-01T16:53:12.232351
| 2017-08-03T18:23:28
| 2017-08-03T18:23:28
| 97,942,129
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,493
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-02 13:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Music',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img_music', models.ImageField(blank=True, upload_to='img_music')),
('name_music', models.CharField(max_length=100)),
('name_singer', models.CharField(max_length=100)),
('file_music', models.FileField(upload_to='music')),
('name_album', models.CharField(blank=True, max_length=100)),
('date_created', models.DateTimeField(auto_now_add=True)),
('sunny', models.PositiveIntegerField(default=1, verbose_name='맑음')),
('foggy', models.PositiveIntegerField(default=1, verbose_name='안개')),
('rainy', models.PositiveIntegerField(default=1, verbose_name='비')),
('cloudy', models.PositiveIntegerField(default=1, verbose_name='흐림')),
('snowy', models.PositiveIntegerField(default=1, verbose_name='눈')),
('name_author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Playlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_playlist', models.CharField(default='playlist', max_length=30)),
],
),
migrations.CreateModel(
name='PlaylistMusics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_added', models.DateTimeField(auto_now_add=True)),
('music', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music.Music')),
('name_playlist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music.Playlist')),
],
),
migrations.CreateModel(
name='Weather',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('latitude', models.FloatField(verbose_name='위도')),
('longitude', models.FloatField(verbose_name='경도')),
('location', models.CharField(max_length=100)),
('time_saved', models.DateTimeField(auto_now_add=True)),
('cur_weather', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='playlist',
name='playlist_musics',
field=models.ManyToManyField(related_name='playlist_musics', through='music.PlaylistMusics', to='music.Music'),
),
migrations.AddField(
model_name='playlist',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
[
"qufskan9396@gmail.com"
] |
qufskan9396@gmail.com
|
c420454c31093dccbd03cc54672bcf97c527f1cd
|
8e2b49a1f570c51c2cd7766387c058cdfadce864
|
/src/services/package_service.py
|
a1ed5c7f92c6084c40feb28b8a93081e052d92dd
|
[
"MIT"
] |
permissive
|
c-w-m/mongo-quickstart
|
c6dc9fdc0232df757f57f00a6d4fd0a4bd10c962
|
dc61993a07bfbdeb1e36c47dfbc210fe9177ed8e
|
refs/heads/master
| 2023-02-25T01:10:11.779036
| 2021-01-31T20:20:13
| 2021-01-31T20:20:13
| 334,733,471
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,326
|
py
|
from typing import Optional, List
from data.downloads import Download
from data.packages import Package
from data.release_history import ReleaseHistory
from data.users import User
class PackageService:
@classmethod
def package_count(cls):
return Package.objects().count()
@classmethod
def release_count(cls):
return ReleaseHistory.objects().count()
@classmethod
def user_count(cls):
return User.objects().count()
@classmethod
def download_count(cls):
return Download.objects().count()
@classmethod
def find_package_by_name(cls, name):
package = Package.objects(name=name).first()
return package
@classmethod
def latest_release(cls, package: Package) -> Optional[ReleaseHistory]:
release = ReleaseHistory \
.objects(package_id=package.id) \
.order_by('-created') \
.first()
return release
@classmethod
def find_maintainers(cls, package: Package) -> List[User]:
users = User.objects(id__in=package.maintainers)
return list(users)
@classmethod
def popular_packages(cls, limit: int) -> List[Package]:
packages = Package.objects()\
.order_by('-total_downloads')\
.limit(limit)
return list(packages)
|
[
"mikeckennedy@gmail.com"
] |
mikeckennedy@gmail.com
|
a84b063eec175fb6c346c4f36e9ac302df8415ab
|
b6c069539ae1caf59686b93aef6a6eb2609b0ac1
|
/hw1/utils.py
|
270002cec77166431781a3c3482f3a0f3840da13
|
[] |
no_license
|
Vivek-23-Titan/CMU-16824-Vision-Learning-and-Recognition
|
2e52ead0ca318190cf4a32ebdbe2c75de19a9a50
|
2b5fca32ca313781d2c137e1f9c5ea77eb332797
|
refs/heads/main
| 2023-08-23T11:14:29.281288
| 2021-10-15T19:48:12
| 2021-10-15T19:48:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,745
|
py
|
# --------------------------------------------------------
# Written by Yufei Ye (https://github.com/JudyYe)
# Modified by Sudeep Dasari
# --------------------------------------------------------
import os
import torch
import numpy as np
import sklearn.metrics
from torch.utils.data import DataLoader
class ARGS(object):
"""
Tracks hyper-parameters for trainer code
- Feel free to add your own hparams below (cannot have __ in name)
- Constructor will automatically support overrding for non-default values
Example::
>>> args = ARGS(batch_size=23, use_cuda=True)
>>> print(args)
args.batch_size = 23
args.device = cuda
args.epochs = 14
args.gamma = 0.7
args.log_every = 100
args.lr = 1.0
args.save_model = False
args.test_batch_size = 1000
args.val_every = 100
"""
# input batch size for training
batch_size = 64
# input batch size for testing
test_batch_size=1000
# number of epochs to train for
epochs = 14
# learning rate
lr = 1.0
# Learning rate step gamma
gamma = 0.7
# how many batches to wait before logging training status
log_every = 100
# how many batches to wait before evaluating model
val_every = 100
# set flag to True if you wish to save the model after training
save_at_end = False
# set this to value >0 if you wish to save every x epochs
save_freq=-1
# set true if using GPU during training
use_cuda = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
assert '__' not in k and hasattr(self, k), "invalid attribute!"
assert k != 'device', "device property cannot be modified"
setattr(self, k, v)
def __repr__(self):
repr_str = ''
for attr in dir(self):
if '__' not in attr and attr !='use_cuda':
repr_str += 'args.{} = {}\n'.format(attr, getattr(self, attr))
return repr_str
@property
def device(self):
return torch.device("cuda" if self.use_cuda else "cpu")
def get_data_loader(name='voc', train=True, batch_size=64, split='train'):
if name == 'voc':
from voc_dataset import VOCDataset
dataset = VOCDataset(split, 224)
else:
raise NotImplementedError
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=train,
num_workers=4,
)
return loader
def compute_ap(gt, pred, valid, average=None):
"""
Compute the multi-label classification accuracy.
Args:
gt (np.ndarray): Shape Nx20, 0 or 1, 1 if the object i is present in that
image.
pred (np.ndarray): Shape Nx20, probability of that object in the image
(output probablitiy).
valid (np.ndarray): Shape Nx20, 0 if you want to ignore that class for that
image. Some objects are labeled as ambiguous.
Returns:
AP (list): average precision for all classes
"""
nclasses = gt.shape[1]
AP = []
for cid in range(nclasses):
gt_cls = gt[:, cid][valid[:, cid] > 0].astype('float32')
pred_cls = pred[:, cid][valid[:, cid] > 0].astype('float32')
# As per PhilK. code:
# https://github.com/philkr/voc-classification/blob/master/src/train_cls.py
pred_cls -= 1e-5 * gt_cls
ap = sklearn.metrics.average_precision_score(
gt_cls, pred_cls, average=average)
AP.append(ap)
return AP
def eval_dataset_map(model, device, test_loader):
"""
Evaluate the model with the given dataset
Args:
model (keras.Model): model to be evaluated
dataset (tf.data.Dataset): evaluation dataset
Returns:
AP (list): Average Precision for all classes
MAP (float): mean average precision
"""
gt, pred, valid = None, None, None
with torch.no_grad():
for data, target, wgt in test_loader:
## TODO insert your code here
data, target, wgt = data.to(device), target.to(device), wgt.to(device)
softmax = torch.nn.Softmax(dim=1)
if gt is None:
gt = target.cpu().detach().numpy()
pred = softmax(model(data)).cpu().detach().numpy()
valid = wgt.cpu().detach().numpy()
else:
gt = np.concatenate((gt, target.cpu().detach().numpy()), axis=0)
pred = np.concatenate((pred, softmax(model(data)).cpu().detach().numpy()), axis=0)
valid = np.concatenate((valid, wgt.cpu().detach().numpy()), axis=0)
AP = compute_ap(gt, pred, valid)
mAP = np.mean(AP)
return AP, mAP
|
[
"jiaqig@umich.edu"
] |
jiaqig@umich.edu
|
48bcd1eda9bf5d5c7cffb68b6c5ce2ff420166b9
|
a958b5eca7ef63695e317fe87144b85b475306cf
|
/hypha/apply/funds/migrations/0001_initial.py
|
b148de45cbacf495758597f423ab37507c2cd2be
|
[
"BSD-3-Clause"
] |
permissive
|
ResetNetwork/apply-app
|
2fe0a4aed05e7609cca9cd8e0202d13bbc0b272f
|
a5f998b8a6c3059f31373f5c4aa700d1a563d844
|
refs/heads/main
| 2022-02-16T22:39:13.099637
| 2021-02-16T07:49:31
| 2022-02-01T13:26:32
| 201,969,914
| 1
| 4
|
BSD-3-Clause
| 2021-02-16T08:03:32
| 2019-08-12T16:38:58
|
Python
|
UTF-8
|
Python
| false
| false
| 802
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-22 09:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('images', '0001_initial'),
('wagtailcore', '0040_page_draft_title'),
]
operations = [
migrations.CreateModel(
name='FundType',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
[
"todd.dembrey@torchbox.com"
] |
todd.dembrey@torchbox.com
|
d9e4848a0179fdd7bc93c029226ed24b09f7e898
|
c6d22cf128819af1d48d02972bb9296a1687b9bb
|
/venv/Lib/site-packages/traitsui/editors/time_editor.py
|
95807375757c299d670c057392cc5ed9fd49bfc8
|
[] |
no_license
|
GenomePhD/Bio1-HIV
|
92808a1e7e6339da6d07190ba3e1a2071f3e8428
|
b5059e7f121e4abb6888893f91f95dd79aed9ca4
|
refs/heads/master
| 2022-10-28T21:55:42.998205
| 2018-04-16T18:52:32
| 2018-04-16T18:52:32
| 129,792,081
| 0
| 1
| null | 2022-10-05T18:36:22
| 2018-04-16T19:03:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,448
|
py
|
#------------------------------------------------------------------------------
#
# Copyright (c) 2008, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: Judah De Paula
# Date: 10/7/2008
#
#------------------------------------------------------------------------------
""" A Traits UI editor that wraps a WX timer control.
"""
from traits.api import Str
from ..editor_factory import EditorFactory
from ..ui_traits import AView
class TimeEditor(EditorFactory):
""" Editor factory for time editors. Generates _TimeEditor()s.
"""
#-------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------
#-- ReadonlyEditor traits ------------------------------------------------
# Message to show when Time is None.
message = Str('Undefined')
# The string representation of the time to show. Uses time.strftime
# format.
strftime = Str('%I:%M:%S %p')
# An optional view to display when a read-only text editor is clicked:
view = AView
|
[
"stevetmat@users.noreply.github.com"
] |
stevetmat@users.noreply.github.com
|
80ce61a7d4adb75a865075dabf522a08ff7a377b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03786/s371328153.py
|
c0a9c347530052541335ff74e3633b64f918869c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
n=int(input())
a=list(map(int,input().split()))
a.sort()
count1=0
count2=0
for i in range(n-1):
count1+=a[i]
if 2*count1<a[i+1]:
count2=i+1
print(n-count2)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
6c6a48f3986047a387a97d668a570c3d1a412af3
|
9523ce426577bb38dc1260e4095e735fb817e1fd
|
/utils/misc/logging.py
|
46115961bf5f48aca4f03f1f25534ec7dbc9b002
|
[] |
no_license
|
Amantais/Kyrgyz-Stand-Up-Bot
|
58147557aaf2e7ea714489d3614b0fa09eb943f8
|
028d36493b79f6b02e12d672688ef94e526946dc
|
refs/heads/master
| 2023-03-28T03:28:42.001424
| 2021-03-25T17:27:49
| 2021-03-25T17:27:49
| 349,154,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
import logging
logging.basicConfig(format=u'%(filename)s [LINE:%(lineno)d] #%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
)
|
[
"your@example.com"
] |
your@example.com
|
5304e10f37ba2ce8b19aba3947560874a9d1e44d
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/294/83407/submittedfiles/testes.py
|
3af7ad75e3ebbf1db0fb72a2d6435d6b0eab5d25
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n= int(input('Digite o número: '))
i= 1
while n>0:
f= n*i
print(f)
i += 1
continue
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
8cff9b1d4c1909959d729269cf817583caf4643b
|
964f2882117ff656d7a2757c233c6dd88226d975
|
/services/web/server/setup.py
|
7a71b500f01dcf016b26ad41ba3046e07d2ab0fb
|
[
"MIT"
] |
permissive
|
ignapas/osparc-simcore
|
a002dd47d7689af9c1c650eea33e31add2b182c1
|
cb62e56b194265a907f260f3071c55a65f569823
|
refs/heads/master
| 2023-01-22T08:55:32.580775
| 2022-12-09T15:57:36
| 2022-12-09T15:57:36
| 170,852,656
| 0
| 0
|
MIT
| 2023-01-09T05:03:04
| 2019-02-15T11:12:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,852
|
py
|
import re
import sys
from pathlib import Path
from setuptools import find_packages, setup
def read_reqs(reqs_path: Path) -> set[str]:
return {
r
for r in re.findall(
r"(^[^#\n-][\w\[,\]]+[-~>=<.\w]*)",
reqs_path.read_text(),
re.MULTILINE,
)
if isinstance(r, str)
}
CURRENT_DIR = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve().parent
# Hard requirements on third-parties and latest for in-repo packages
INSTALL_REQUIREMENTS = tuple(
read_reqs(CURRENT_DIR / "requirements" / "_base.txt")
| {
"simcore-models-library",
"simcore-postgres-database",
"simcore-settings-library",
"simcore-service-library[aiohttp]>=1.2.0",
}
)
TEST_REQUIREMENTS = tuple(read_reqs(CURRENT_DIR / "requirements" / "_test.txt"))
SETUP = dict(
name="simcore-service-webserver",
version=Path(CURRENT_DIR / "VERSION").read_text().strip(),
description="Main service with an interface (http-API & websockets) to the web front-end",
author=", ".join(
(
"Pedro Crespo-Valero (pcrespov)",
"Sylvain Anderegg (sanderegg)",
"Andrei Neagu (GitHK)",
)
),
packages=find_packages(where="src"),
package_dir={
"": "src",
},
include_package_data=True,
package_data={
"": [
"api/v0/openapi.yaml",
"api/v0/schemas/*.json",
"templates/**/*.jinja2",
]
},
entry_points={
"console_scripts": [
"simcore-service-webserver=simcore_service_webserver.__main__:main",
]
},
python_requires="~=3.9",
install_requires=INSTALL_REQUIREMENTS,
tests_require=TEST_REQUIREMENTS,
setup_requires=["pytest-runner"],
)
if __name__ == "__main__":
setup(**SETUP)
|
[
"noreply@github.com"
] |
ignapas.noreply@github.com
|
b9f07dccd6e3fb6e5a6a7332aa88808ea237dad9
|
c792b076cdf8c943c344d90b21817dd501c165ab
|
/boj/그래프/2252_줄세우기.py
|
cacd0a05917fbe5ef91f3edac525e6ba96dd809d
|
[] |
no_license
|
Jdoublee/CodingTestPractice
|
d68afa38e64de67aa53ab8c6569e07e7b310a83d
|
83eb2b84f63d55808a5e9b014e023b72bf4a4e9e
|
refs/heads/master
| 2023-06-02T16:48:52.913402
| 2021-06-16T13:34:40
| 2021-06-16T13:34:40
| 290,072,409
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
from collections import deque
import sys
input = sys.stdin.readline
n, m = map(int, input().split())
graph = [[] for _ in range(n+1)]
indegree = [0] * (n+1)
for _ in range(m):
a, b = map(int, input().split())
graph[a].append(b)
indegree[b] += 1
q = deque()
for i in range(1,n+1):
if indegree[i] == 0:
q.append(i)
res = []
while q:
now = q.popleft()
res.append(now)
for i in graph[now]:
indegree[i] -= 1
if indegree[i] == 0:
q.append(i)
for i in res:
print(i, end=' ')
|
[
"hyewon3429@gmail.com"
] |
hyewon3429@gmail.com
|
d25632912251f2ba76809c013cba68857ea833a2
|
4382c60f18aba351a2e7cdab7ce2793c2d27717c
|
/Algorithm 190902/addnumber.py
|
b32d1dbc234824bd231157352bf2adefaa53d744
|
[] |
no_license
|
vxda7/pycharm
|
e550b1db4cabe1a0fa03e140f33b028ef08bd4cb
|
ce29f682a923875b62a8c7c0102790eef11ab156
|
refs/heads/master
| 2020-07-03T11:27:27.807096
| 2019-11-15T08:50:32
| 2019-11-15T08:50:32
| 201,891,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 318
|
py
|
t = int(input())
for tc in range(1, t+1):
N, M, L = map(int, input().split()) # N 수열길이 M 필요한 숫자 L 출력인덱스
res = list(map(int, input().split()))
for i in range(M):
idx, num = map(int, input().split())
res.insert(idx, num)
print("#{} {}".format(tc, res[L]))
|
[
"vxda77@gmail.com"
] |
vxda77@gmail.com
|
744d9d1e4d6ea3d0eacfc2acbca5743b1b6df196
|
5a7abc4537039860c49e9a80219efa759aad1b6f
|
/tests/providers/aws/services/route53/route53domains_service_test.py
|
75d5616d62658b4778bd6f36245dd74ea10459d7
|
[
"Apache-2.0"
] |
permissive
|
sec-js/prowler
|
d5a06c72f5d7e490bade1167966f83f7a5d7ed15
|
f72be9a1e492ad593c9ac267d3ca07f626263ccd
|
refs/heads/master
| 2023-08-31T22:48:33.983360
| 2022-12-22T16:02:28
| 2022-12-22T16:02:28
| 243,866,744
| 0
| 0
|
Apache-2.0
| 2022-12-23T12:23:20
| 2020-02-28T22:37:02
|
Python
|
UTF-8
|
Python
| false
| false
| 4,172
|
py
|
from datetime import datetime
from unittest.mock import patch
import botocore
from boto3 import session
from prowler.providers.aws.lib.audit_info.audit_info import AWS_Audit_Info
from prowler.providers.aws.services.route53.route53_service import Route53Domains
# Mock Test Region
AWS_REGION = "us-east-1"
# Mocking Access Analyzer Calls
make_api_call = botocore.client.BaseClient._make_api_call
def mock_make_api_call(self, operation_name, kwarg):
"""We have to mock every AWS API call using Boto3"""
if operation_name == "ListDomains":
return {
"Domains": [
{
"DomainName": "test.domain.com",
"AutoRenew": True,
"TransferLock": True,
"Expiry": datetime(2015, 1, 1),
},
],
"NextPageMarker": "string",
}
if operation_name == "GetDomainDetail":
return {
"DomainName": "test.domain.com",
"Nameservers": [
{
"Name": "8.8.8.8",
"GlueIps": [],
},
],
"AutoRenew": True,
"AdminContact": {},
"RegistrantContact": {},
"TechContact": {},
"AdminPrivacy": True,
"RegistrantPrivacy": True,
"TechPrivacy": True,
"RegistrarName": "string",
"WhoIsServer": "string",
"RegistrarUrl": "string",
"AbuseContactEmail": "string",
"AbuseContactPhone": "string",
"RegistryDomainId": "string",
"CreationDate": datetime(2015, 1, 1),
"UpdatedDate": datetime(2015, 1, 1),
"ExpirationDate": datetime(2015, 1, 1),
"Reseller": "string",
"DnsSec": "string",
"StatusList": ["clientTransferProhibited"],
}
return make_api_call(self, operation_name, kwarg)
# Patch every AWS call using Boto3 and generate_regional_clients to have 1 client
@patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call)
class Test_Route53_Service:
# Mocked Audit Info
def set_mocked_audit_info(self):
audit_info = AWS_Audit_Info(
original_session=None,
audit_session=session.Session(
profile_name=None,
botocore_session=None,
),
audited_account=None,
audited_user_id=None,
audited_partition="aws",
audited_identity_arn=None,
profile=None,
profile_region=AWS_REGION,
credentials=None,
assumed_role_info=None,
audited_regions=None,
organizations_metadata=None,
)
return audit_info
# Test Route53Domains Client
def test__get_client__(self):
route53domains = Route53Domains(self.set_mocked_audit_info())
assert route53domains.client.__class__.__name__ == "Route53Domains"
# Test Route53Domains Session
def test__get_session__(self):
route53domains = Route53Domains(self.set_mocked_audit_info())
assert route53domains.session.__class__.__name__ == "Session"
# Test Route53Domains Service
def test__get_service__(self):
route53domains = Route53Domains(self.set_mocked_audit_info())
assert route53domains.service == "route53domains"
def test__list_domains__(self):
route53domains = Route53Domains(self.set_mocked_audit_info())
domain_name = "test.domain.com"
assert len(route53domains.domains)
assert route53domains.domains
assert route53domains.domains[domain_name]
assert route53domains.domains[domain_name].name == domain_name
assert route53domains.domains[domain_name].region == AWS_REGION
assert route53domains.domains[domain_name].admin_privacy
assert route53domains.domains[domain_name].status_list
assert len(route53domains.domains[domain_name].status_list) == 1
assert (
"clientTransferProhibited"
in route53domains.domains[domain_name].status_list
)
|
[
"noreply@github.com"
] |
sec-js.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.