blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
419eb9bae959c6198796833cc3263d0e5f9bbba7
|
efe1546fa1f057cbbbe974bd8478309b6176d641
|
/waf/build_system_kit/extpy/runme.py
|
13d511681ff787c2310ae35bb9572b1f65b75163
|
[
"Apache-2.0"
] |
permissive
|
yankee14/reflow-oven-atmega328p
|
2df323aba16ac4f3eac446abc633a5d79a1a55cb
|
e6792143576f13f0a3a49edfd54dbb2ef851d95a
|
refs/heads/master
| 2022-12-02T21:32:39.513878
| 2019-05-30T06:25:12
| 2019-05-30T06:25:12
| 188,760,664
| 0
| 1
|
Apache-2.0
| 2022-11-15T18:22:50
| 2019-05-27T02:52:18
|
Python
|
UTF-8
|
Python
| false
| false
| 716
|
py
|
#! /usr/bin/env python
# encoding: utf-8
"""
Create a waf file able to read wscript files ending in ".py"
execute a small test to show that it works
The waf file includes "extpy.py" which performs the required modifications
"""
import os, subprocess
up = os.path.dirname
join = os.path.join
cwd = os.getcwd()
extpy = join(cwd, 'extpy.py')
args = 'python waf-light --tools=compat15,%s --prelude=$"\tfrom waflib.extras import extpy\n" ' % extpy
root = up(up(cwd))
subprocess.Popen(args, cwd=root, shell=True).wait()
os.rename(join(root, 'waf'), join(cwd, 'waf.py'))
env = dict(os.environ)
if 'WAFDIR' in env:
del env['WAFDIR']
subprocess.Popen('python waf.py configure', cwd=cwd, shell=True, env=env).wait()
|
[
"yankee14.ed@gmail.com"
] |
yankee14.ed@gmail.com
|
0808681e06b79d03a13f5c3cc9d25a5416e84eb7
|
36725b6d5af16e1bb6431f401dee76e1d6a68f80
|
/homework_06/fill_tables.py
|
137df0dbff717f05e6497f05f676c48467cc7d0e
|
[] |
no_license
|
ivan985/learning
|
09c15de7a06e2503694cc6ab740e3da40e51818a
|
aa92da68906d000bca136ce3cc9022e479aeb2b7
|
refs/heads/master
| 2021-03-29T10:22:55.060195
| 2020-10-28T14:22:24
| 2020-10-28T14:22:24
| 247,945,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
from homework_06.models import Session, Author, Post
from homework_06.models.texts import text_dict
# заполнение базы данных
def create_users_posts():
session = Session()
author_1 = Author(author='А.И. Тургенев')
session.add(author_1)
author_2 = Author(author='Л.Н. Андреев')
session.add(author_2)
author_3 = Author(author='А.М. Горький')
session.add(author_3)
session.flush()
post_1 = Post(
author_id=author_1.id,
title='Отцы и дети',
text=text_dict['Отцы и дети'],
year='1862'
)
post_2 = Post(
author_id=author_2.id,
title='Красный смех',
text=text_dict['Красный смех'],
year='1904'
)
post_3 = Post(
author_id=author_2.id,
title='Елеазар',
text=text_dict['Елеазар'],
year='1906'
)
post_4 = Post(
author_id=author_3.id,
title='Хозяева жизни',
text=text_dict['Хозяева жизни'],
year='1906'
)
post_5 = Post(
author_id=author_3.id,
title='Городок Окуров',
text=text_dict['Городок Окуров'],
year='1909'
)
session.add(post_1)
session.add(post_2)
session.add(post_3)
session.add(post_4)
session.add(post_5)
session.commit()
session.close()
if __name__ == "__main__":
create_users_posts()
|
[
"noreply@github.com"
] |
ivan985.noreply@github.com
|
20dc1cd814e3f45b22a8b8dd0502c5eeef9b00cd
|
232fb4da8a69088ce7df987992ed7d9db175c97f
|
/SW_academy/Intermediate/01.List1_summary.py
|
64c0b7ee23167c2f21f6a4f0995c2b1e43cb30cb
|
[] |
no_license
|
miniii222/Algorithm_Study
|
2d23b955340fb851d8030a91c0205101d71ff366
|
71e46ecd3b607b30734d99b0f56016c6c9c66acc
|
refs/heads/master
| 2020-04-10T08:34:57.984730
| 2019-10-28T12:41:36
| 2019-10-28T12:41:36
| 160,909,012
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,880
|
py
|
# -*- coding: utf-8 -*-
# Exhaustive Search - 모든 경우를 탐색. 속도는 느리지만 틀리지 않음
## 1,2,3을 포함하는 모든 순열을 생성하는 코드
for i1 in range(1,4) :
for i2 in range(1,4) :
if i2 != i1 :
for i3 in range(1,4) :
if i3 != i1 and i3 != i2 :
print(i1, i2, i3)
# Greedy Algorithm - 그 순간에 최적이라고 생각되는 것을 선택해 나가는 방식. 전체 최적이라는 보장 없음
# 해 선택 -> 실행 가능성 검사 -> 해 검사
## Baby-gin 문제
num = 456789
c = [0] * 12
for i in range(6) :
c[num % 10] += 1
num //= 10
i = 0
tri = run = 0
while i < 10 :
if c[i] >= 3 : #triplet 확인
c[i] -=3
tri +=1
continue;
if c[i] >= 1 and c[i+1] >= 1 and c[i+2] >= 1 :
c[i] -=1
c[i+1] -=1
c[i+2] -=1
run +=1
continue
i +=1
if run + tri ==2 :
print('Baby Gin')
# 답을 찾지 못하는 경우도 있음
# Bubble Sort -> 시간 복잡도 O(n^2)
# Counting Sort -> 식나 복잡도 O(n + k) n : 리스트의 개수 / k : 정수의 최댓값
## 시간 복잡도가 1차이지만, n이 너무 커지면 메모리 에러 가능성이 있음
def CountingSort(A, B, k) :
#A(n) : 입력 리스트 사용된 숫자(1 ~ k)
#B(n) : 정렬된 리스트
#C(k) : 카운트 리스트
C = [0] * k
for i in range(len(B)) :
C[A[i]] += 1 #각 숫자들이 몇 개인지 센다
for i in range(1, len(C)) :
C[i] += C[i-1] #누적합을 구함으로써 각 숫자들의 정렬 후 마지막 위치를 저장
for i in range(len(B) -1, -1, -1) :
B[C[A[i]]-1] = A[i]
C[A[i]]-=1
|
[
"noreply@github.com"
] |
miniii222.noreply@github.com
|
e1b94d3981a5d64c9f55976f5877cadf98be2286
|
f230cb90c20400eb3ba99282da265f8ec9dcf913
|
/main.py
|
b0ef89a877bd48b67c607b2e4c6b95249287a219
|
[] |
no_license
|
OrionApplePie/tec_analysis
|
a36058fa1a197c0199ddaca32556149a68c11c0d
|
1fb7eb33338fe61a244a02bbf8975f553246b68a
|
refs/heads/master
| 2023-05-06T04:07:31.218461
| 2021-05-20T03:10:50
| 2021-05-20T03:26:41
| 352,912,440
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
import argparse
import errno
import os
from pathlib import Path
import pandas as pd
# Параметры которые нужны
NEEDED_PARAMETERS = (
"hmax",
"FiltWindow",
"Time Range",
"Minimum duration of series",
"time step"
)
def get_params(filename=""):
"""Функция для сбора параметров из файла .dat
из строк в начале файла начинающихся с символа #.
Возвращает словарь.
"""
if not filename:
raise ValueError("Filename is needed.")
if not os.path.isfile(filename):
raise FileNotFoundError(
errno.ENOENT,
os.strerror(errno.ENOENT),
filename
)
params = []
with open(filename, 'r') as tec_file:
for line in tec_file.readlines():
if line.strip().startswith("#"): # это параметр
param = line.strip()[1:]
param = param.split("=")
if len(param) != 2: # заголовки пропускаем
continue
param = tuple(el.strip() for el in param)
params.append(param)
params = dict(params)
# оставляем только нужные параметры
return {
key: params[key]
for key
in NEEDED_PARAMETERS
}
def main():
parser = argparse.ArgumentParser(
description="""Утилита для работы с файлами TEC.
Загрузка данных их файла в структуру данных."""
)
parser.add_argument(
'-tec_file',
type=Path, action='store',
dest='tec_file', default=Path(''),
help='Файл с данными TEC (.dat).'
)
args = parser.parse_args()
params = get_params(args.tec_file)
print(params)
headers = (
"tsn", "time", "el", "az",
"latp", "lonp", "tec",
"tec_filtered", "validity"
)
dataframe = pd.read_csv(
filepath_or_buffer=args.tec_file,
delim_whitespace=True, # разделитель - пробел(ы)
comment="#", # пропускаем комменты
names=headers, # задание заголовков из списка
)
print(dataframe)
if __name__ == '__main__':
main()
|
[
"alexander.s.ten@yandex.ru"
] |
alexander.s.ten@yandex.ru
|
689c76e4cb927a279338046a277abb8d87d3326c
|
9d74705bb064cb75d3e02c2222a6e0ad62c1909a
|
/python/1_two_sum/sum.py
|
04e175291a23d9ef9149ad29067bcb3c41fe673b
|
[] |
no_license
|
caoxin1988/leetcode
|
7efcd5fb156c644c2ea84038aba8c672c73d6d23
|
22be1c1904e247bb3a248c87284c650e68ddb328
|
refs/heads/master
| 2020-04-01T19:40:12.119223
| 2019-02-21T14:33:06
| 2019-02-21T14:33:06
| 153,565,624
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d = {}
for i in range(len(nums)):
d[nums[i]] = i
for i in range(len(nums)):
data1 = nums[i]
data2 = target - data1
if data2 in d.keys() and d[data2] != i:
return [i, d[data2]]
return None
|
[
"caoxin1988s@gmail.com"
] |
caoxin1988s@gmail.com
|
72564fd3a93db7e844c4f2063124a0ab78d8810b
|
0de720edab2c0dc52e5940ef77c3956d186c9cdf
|
/pyzipcode/pyzipcode.py
|
1b71ec461f62e53e30f9656bb2d820f8162495b6
|
[
"MIT",
"Python-2.0"
] |
permissive
|
ankur09/pyzipcode-cli
|
5febbf325d31ee17442b2746a85186009a955b4b
|
bd3cd1008fe9ef44fd632e1aab20b7b7d9b12198
|
refs/heads/master
| 2021-01-19T07:15:17.728056
| 2016-04-08T20:15:27
| 2016-04-08T20:15:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,264
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import requests
import json
__version__ = '0.1.3'
__author__ = "Tasdik Rahman"
class Pyzipcode(object):
BASE_URL_ZIPTASTIC_API = 'http://zip.getziptastic.com/v2/{code}/{pin}'
BASE_URL_GOOGLE_API = 'http://maps.googleapis.com/maps/api/geocode/json?address={pin}'
@staticmethod
def query_ziptastic_api(pincode, country_code="IN", return_json=False):
"""
appends the pincode to the base API url and queries the API.
country_code defaults to 'IN'
:param pincode: The pincode
:param country_code: The country for the pincode. Defaults to "IN"
:returns: returns the JSON data returned by the API
"""
url = Pyzipcode.BASE_URL_ZIPTASTIC_API.format(code=country_code, pin=pincode)
response = requests.get(url)
if response.status_code == 200:
json_obj = response.json()
if return_json == True:
return json.dumps(json_obj)
else:
return json_obj
else:
return False
@staticmethod
def query_google_api(pincode, return_json=False):
"""
queries the Google maps API for getting the longitude and latitude
for a given pincode
:param pincode: The pincode
:returns: returns the latitude and longitude
"""
url = Pyzipcode.BASE_URL_GOOGLE_API.format(pin=pincode)
response = requests.get(url)
if response.status_code == 200:
json_obj = response.json()
if json_obj["status"] == "OK":
results = json_obj["results"][0]["geometry"]
'''Storing the JSON data'''
data = {
"location": results["location"],
"location_type": results["location_type"],
"bounds": results["bounds"]
}
if return_json == True:
return json.dumps(data)
else:
return data
else:
return False
else:
return False
@staticmethod
def get(pincode, country_code="IN",return_json=False):
"""
Unifies the JSON data from different API's into a single one
:param pincode: pincode for the place
:param country_code: country code for the pincode. You can find the list of country codes in
"https://github.com/prodicus/pyzipcode-cli/blob/master/pyzipcode_cli/countries.json"
:returns: A unified result of both ziptastic and google maps API
"""
data_API_1 = Pyzipcode.query_ziptastic_api(pincode, country_code)
data_API_2 = Pyzipcode.query_google_api(pincode)
if data_API_2 is not False and data_API_1 is not False:
# final_dictionary = {
# "ziptastic": data_API_1,
# "google_maps": data_API_2
# }
data_API_1.update(data_API_2) ## merges the two dictionaries
if return_json == True:
return json.dumps(data_API_1)
else:
return data_API_1
else:
return False
|
[
"tasdik95@gmail.com"
] |
tasdik95@gmail.com
|
c1de6af02db7e340e15418e1cabcb0d12c2f6d29
|
b584b389adf9b74b612d99e841a1b3bcb0c44171
|
/spring2017/hw3/submission/dqn.py
|
7554bb329bd88df945d50f740845bc5b58792b5a
|
[] |
no_license
|
prasannakumar2012/deep_reinforcement_learning
|
add06e01cf88698f7b4959f8f2fec24faf05f022
|
37e299017d2481de6471fa366426b2e9152ea531
|
refs/heads/master
| 2021-05-14T04:44:03.482266
| 2019-01-08T04:33:19
| 2019-01-08T04:33:19
| 116,651,145
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,290
|
py
|
import sys
import gym.spaces
import itertools
import numpy as np
import random
import tensorflow as tf
import tensorflow.contrib.layers as layers
from collections import namedtuple
from dqn_utils import *
import pickle
OptimizerSpec = namedtuple("OptimizerSpec", ["constructor", "kwargs", "lr_schedule"])
def learn(env,
q_func,
optimizer_spec,
session,
exploration=LinearSchedule(1000000, 0.1),
stopping_criterion=None,
replay_buffer_size=1000000,
batch_size=32,
gamma=0.99,
learning_starts=50000,
learning_freq=4,
frame_history_len=4,
target_update_freq=10000,
grad_norm_clipping=10,
lr_multiplier=1.0):
"""Run Deep Q-learning algorithm.
You can specify your own convnet using q_func.
All schedules are w.r.t. total number of steps taken in the environment.
Parameters
----------
env: gym.Env
gym environment to train on.
q_func: function
Model to use for computing the q function. It should accept the
following named arguments:
img_in: tf.Tensor
tensorflow tensor representing the input image
num_actions: int
number of actions
scope: str
scope in which all the model related variables
should be created
reuse: bool
whether previously created variables should be reused.
optimizer_spec: OptimizerSpec
Specifying the constructor and kwargs, as well as learning rate schedule
for the optimizer
session: tf.Session
tensorflow session to use.
exploration: rl_algs.deepq.utils.schedules.Schedule
schedule for probability of chosing random action.
stopping_criterion: (env, t) -> bool
should return true when it's ok for the RL algorithm to stop.
takes in env and the number of steps executed so far.
replay_buffer_size: int
How many memories to store in the replay buffer.
batch_size: int
How many transitions to sample each time experience is replayed.
gamma: float
Discount Factor
learning_starts: int
After how many environment steps to start replaying experiences
learning_freq: int
How many steps of environment to take between every experience replay
frame_history_len: int
How many past frames to include as input to the model.
target_update_freq: int
How many experience replay rounds (not steps!) to perform between
each update to the target Q network
grad_norm_clipping: float or None
If not None gradients' norms are clipped to this value.
"""
assert type(env.observation_space) == gym.spaces.Box
assert type(env.action_space) == gym.spaces.Discrete
###############
# BUILD MODEL #
###############
if len(env.observation_space.shape) == 1:
# This means we are running on low-dimensional observations (e.g. RAM)
input_shape = env.observation_space.shape
else:
img_h, img_w, img_c = env.observation_space.shape
input_shape = (img_h, img_w, frame_history_len * img_c)
num_actions = env.action_space.n
# set up placeholders
# placeholder for current observation (or state)
obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))
# placeholder for current action
act_t_ph = tf.placeholder(tf.int32, [None])
# placeholder for current reward
rew_t_ph = tf.placeholder(tf.float32, [None])
# placeholder for next observation (or state)
obs_tp1_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))
# placeholder for end of episode mask
# this value is 1 if the next state corresponds to the end of an episode,
# in which case there is no Q-value at the next state; at the end of an
# episode, only the current state reward contributes to the target, not the
# next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)
done_mask_ph = tf.placeholder(tf.float32, [None])
# casting to float on GPU ensures lower data transfer times.
obs_t_float = tf.cast(obs_t_ph, tf.float32) / 255.0
obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0
# Here, you should fill in your own code to compute the Bellman error. This requires
# evaluating the current and next Q-values and constructing the corresponding error.
# TensorFlow will differentiate this error for you, you just need to pass it to the
# optimizer. See assignment text for details.
# Your code should produce one scalar-valued tensor: total_error
# This will be passed to the optimizer in the provided code below.
# Your code should also produce two collections of variables:
# q_func_vars
# target_q_func_vars
# These should hold all of the variables of the Q-function network and target network,
# respectively. A convenient way to get these is to make use of TF's "scope" feature.
# For example, you can create your Q-function network with the scope "q_func" like this:
# <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)
# And then you can obtain the variables like this:
# q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
# Older versions of TensorFlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES"
######
# YOUR CODE HERE
######
# Declare variables for logging
t_log = []
mean_reward_log = []
best_mean_log = []
episodes_log = []
exploration_log = []
learning_rate_log = []
current_q_func = q_func(obs_t_float, num_actions, scope="q_func", reuse=False) # Current Q-Value Function
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
target_q_func = q_func(obs_tp1_float, num_actions, scope="target_q_func", reuse=False) # Target Q-Value Function
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')
act_t = tf.one_hot(act_t_ph, depth=num_actions, dtype=tf.float32, name="action_one_hot")
q_act_t = tf.reduce_sum(act_t*current_q_func, axis=1)
y = rew_t_ph + gamma * tf.reduce_max(target_q_func, reduction_indices=[1]) #which axis for max?
total_error = tf.square(tf.subtract(y, q_act_t)) #(reward + gamma*V(s') - Q(s, a))**2
# construct optimization op (with gradient clipping)
learning_rate = tf.placeholder(tf.float32, (), name="learning_rate")
optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs)
train_fn = minimize_and_clip(optimizer, total_error,
var_list=q_func_vars, clip_val=grad_norm_clipping)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_fn = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_fn.append(var_target.assign(var))
update_target_fn = tf.group(*update_target_fn)
# construct the replay buffer
replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)
###############
# RUN ENV #
###############
model_initialized = False
num_param_updates = 0
mean_episode_reward = -float('nan')
best_mean_episode_reward = -float('inf')
last_obs = env.reset()
LOG_EVERY_N_STEPS = 10000
SAVE_EVERY_N_STEPS = 200000
for t in itertools.count():
### 1. Check stopping criterion
if stopping_criterion is not None and stopping_criterion(env, t):
break
### 2. Step the env and store the transition
# At this point, "last_obs" contains the latest observation that was
# recorded from the simulator. Here, your code needs to store this
# observation and its outcome (reward, next observation, etc.) into
# the replay buffer while stepping the simulator forward one step.
# At the end of this block of code, the simulator should have been
# advanced one step, and the replay buffer should contain one more
# transition.
# Specifically, last_obs must point to the new latest observation.
# Useful functions you'll need to call:
# obs, reward, done, info = env.step(action)
# this steps the environment forward one step
# obs = env.reset()
# this resets the environment if you reached an episode boundary.
# Don't forget to call env.reset() to get a new observation if done
# is true!!
# Note that you cannot use "last_obs" directly as input
# into your network, since it needs to be processed to include context
# from previous frames. You should check out the replay buffer
# implementation in dqn_utils.py to see what functionality the replay
# buffer exposes. The replay buffer has a function called
# encode_recent_observation that will take the latest observation
# that you pushed into the buffer and compute the corresponding
# input that should be given to a Q network by appending some
# previous frames.
# Don't forget to include epsilon greedy exploration!
# And remember that the first time you enter this loop, the model
# may not yet have been initialized (but of course, the first step
# might as well be random, since you haven't trained your net...)
#####
# YOUR CODE HERE
# Store last_obs into replay buffer
idx = replay_buffer.store_frame(last_obs)
if t == 0:
act, reward, done = env.action_space.sample(), 0, False
# Choose action
epsilon = exploration.value(t)
if not model_initialized or random.random() < epsilon:
# With probability epsilon OR if model hasn't been initialized, choose a random action
act = env.action_space.sample()
else:
# With probability 1 - epsilon, choose the best action from Q
input_batch = replay_buffer.encode_recent_observation()
q_vals = session.run(current_q_func, {obs_t_ph: input_batch[None, :]})
act = np.argmax(q_vals)
# Step simulator forward one step
last_obs, reward, done, info = env.step(act)
replay_buffer.store_effect(idx, act, reward, done) # Store action taken after last_obs and corresponding reward
if done == True: # done was True in latest transition; we have already stored that
last_obs = env.reset() # Reset observation
done = False
#####
# at this point, the environment should have been advanced one step (and
# reset if done was true), and last_obs should point to the new latest
# observation
### 3. Perform experience replay and train the network.
# note that this is only done if the replay buffer contains enough samples
# for us to learn something useful -- until then, the model will not be
# initialized and random actions should be taken
if (t > learning_starts and
t % learning_freq == 0 and
replay_buffer.can_sample(batch_size)):
# Here, you should perform training. Training consists of four steps:
# 3.a: use the replay buffer to sample a batch of transitions (see the
# replay buffer code for function definition, each batch that you sample
# should consist of current observations, current actions, rewards,
# next observations, and done indicator).
# 3.b: initialize the model if it has not been initialized yet; to do
# that, call
# initialize_interdependent_variables(session, tf.global_variables(), {
# obs_t_ph: obs_t_batch,
# obs_tp1_ph: obs_tp1_batch,
# })
# where obs_t_batch and obs_tp1_batch are the batches of observations at
# the current and next time step. The boolean variable model_initialized
# indicates whether or not the model has been initialized.
# Remember that you have to update the target network too (see 3.d)!
# 3.c: train the model. To do this, you'll need to use the train_fn and
# total_error ops that were created earlier: total_error is what you
# created to compute the total Bellman error in a batch, and train_fn
# will actually perform a gradient step and update the network parameters
# to reduce total_error. When calling session.run on these you'll need to
# populate the following placeholders:
# obs_t_ph
# act_t_ph
# rew_t_ph
# obs_tp1_ph
# done_mask_ph
# (this is needed for computing total_error)
# learning_rate -- you can get this from optimizer_spec.lr_schedule.value(t)
# (this is needed by the optimizer to choose the learning rate)
# 3.d: periodically update the target network by calling
# session.run(update_target_fn)
# you should update every target_update_freq steps, and you may find the
# variable num_param_updates useful for this (it was initialized to 0)
#####
# YOUR CODE HERE
# 3.a Sample a batch of transitions
obs_t_batch, act_batch, rew_batch, obs_tp1_batch, done_mask = replay_buffer.sample(batch_size)
# 3.b Initialize model if not initialized yet
if not model_initialized:
initialize_interdependent_variables(session, tf.global_variables(), {
obs_t_ph: obs_t_batch,
obs_tp1_ph: obs_tp1_batch,
})
session.run(update_target_fn)
model_initialized = True
# 3.c Train the model using train_fn and total_error
session.run(train_fn, {obs_t_ph: obs_t_batch, act_t_ph: act_batch, rew_t_ph: rew_batch, obs_tp1_ph: obs_tp1_batch,
done_mask_ph: done_mask, learning_rate: optimizer_spec.lr_schedule.value(t)})
# 3.d Update target network every taret_update_freq steps
if t % target_update_freq == 0:
session.run(update_target_fn)
num_param_updates += 1
#####
### 4. Log progress
episode_rewards = get_wrapper_by_name(env, "Monitor").get_episode_rewards()
if len(episode_rewards) > 0:
mean_episode_reward = np.mean(episode_rewards[-100:])
if len(episode_rewards) > 100:
best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)
if t % LOG_EVERY_N_STEPS == 0 and model_initialized:
print("Timestep %d" % (t,))
t_log.append(t)
print("mean reward (100 episodes) %f" % mean_episode_reward)
mean_reward_log.append(mean_episode_reward)
print("best mean reward %f" % best_mean_episode_reward)
best_mean_log.append(best_mean_episode_reward)
print("episodes %d" % len(episode_rewards))
episodes_log.append(len(episode_rewards))
print("exploration %f" % exploration.value(t))
exploration_log.append(exploration.value(t))
print("learning_rate %f" % optimizer_spec.lr_schedule.value(t))
learning_rate_log.append(optimizer_spec.lr_schedule.value(t))
sys.stdout.flush()
if t % SAVE_EVERY_N_STEPS == 0 and model_initialized:
training_log = ({'t_log': t_log, 'mean_reward_log': mean_reward_log, 'best_mean_log': best_mean_log, 'episodes_log': episodes_log,
'exploration_log': exploration_log, 'learning_rate_log': learning_rate_log})
output_file_name = 'ram_lr'+str(lr_multiplier)+'_' + str(t) + '_data.pkl'
with open(output_file_name, 'wb') as f:
pickle.dump(training_log, f)
|
[
"prasannakumar2012@gmail.com"
] |
prasannakumar2012@gmail.com
|
f8b6e8b882f5111b756f94c4418c914092c1c939
|
ef2d31ec0634c469310774d112669789c10cd859
|
/day04/test5.py
|
c7c853d9ee3e83f659bdc9fba3214b2c0a181a91
|
[] |
no_license
|
susan025/myproj01
|
c8cb38c7c6d26944ec0ad7ff59f9816a6c73af93
|
e2d43f9b9204f386343b06337787ae26075474ab
|
refs/heads/master
| 2020-04-29T06:44:57.537851
| 2019-03-16T05:17:41
| 2019-03-16T05:17:41
| 175,928,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,956
|
py
|
# 对一字符串进行翻转操作
def myStr():
str = "nameisgod"
mlist = []
for i in str:
mlist.append(i)
print(mlist)
mlist.reverse()
print(mlist)
mstr = ""
for j in mlist:
mstr += j
print(mstr)
# 创建一个列表,存储公司10个名单,对这些名单进行排序,要求按姓名的字符多少来排
def compName():
clist = ["john","susan","backham","roy","乔","William","karry"]
llist = []
mdict = dict()
for i in clist:
v = len(i)
llist.append(v)
print(llist)
for j in clist:
mdict[j] = llist[clist.index(j)]
print(mdict)
mdict.values()
for n in mdict.keys():
pass
#输入用户名密码进行注册,要求用户名允许数字字母6-16位,密码6-16位,不允许出现*#!
def regist():
username = str(input("请输入用户名:"))
password = str(input("请输入密码:"))
namebool1 = len(username) >= 6 and len(username)<=16
namebool2 = "*" not in username and "#" not in username and "!" not in username
pwdbool1 = len(password) >= 6 and len(password)<=16
pwdbool2 = "*" not in password and "#" not in password and "!" not in password
if namebool1:
if namebool2:
if pwdbool1:
if pwdbool2:
print("注册成功!")
else:
print("密码中不能含有*#!")
else:
print("密码必须6-16位")
else:
print("用户名中不能含有*#!")
else:
print("用户名必须6-16位")
def sstr():
#输入一个字符串为社会主义核心价值观的全拼,每个词用空格进行分隔,将这个字符串,转成列表,遍历输出
str = "she hui zhu yi he xin jia zhi guan"
mlist = list(str.split(" "))
print(mlist)
if __name__ == '__main__':
myStr()
compName()
regist()
sstr()
|
[
"542711570@qq.com"
] |
542711570@qq.com
|
2733e95a1dfb9747f95994652fd61c54f3b53b49
|
43eb4b71987a11905dfacfd684c20247f171b689
|
/solution/141/answer.py
|
4171aeceb61b530d7ab9cdc36752b6cc1f2e135c
|
[] |
no_license
|
youjiajia/learn-leetcode
|
a44cd994df1f103cd284e561ac82580739dcdeaa
|
f87b018f7abcdc6e7ae7532810688f9a9d8cbb41
|
refs/heads/master
| 2021-09-14T23:39:26.459338
| 2018-05-22T12:11:13
| 2018-05-22T12:11:13
| 104,706,976
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 783
|
py
|
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def printval(self):
print self.val
class Solution(object):
def hasCycle(self, head):
fastindex = head
slowindex = head
while fastindex and slowindex:
fastindex = fastindex.next
slowindex = slowindex.next
if not fastindex:
return False
fastindex = fastindex.next
if fastindex is slowindex:
return True
return False
if __name__ == "__main__":
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
a.next = b
b.next = c
c.next = d
d.next = b
s = Solution()
result = s.hasCycle(a)
print result
|
[
"jasonyou.info@gmail.com"
] |
jasonyou.info@gmail.com
|
a40bc1b5b7298acf3dc92a84ffab6f60e8a25e03
|
d4d9338389252eaa2ce58c4f3ae953b4239b1e8f
|
/week5_7.py
|
60044e7fdfbbe24f81d54171c5564484da8d0413
|
[] |
no_license
|
Sokiryanskaya/learn_python
|
7081149ceeb0570a6f4e83bf1ffbf21a682ec6ad
|
f6308e5f1afc15c74fea48a05ddda97f93d9d61c
|
refs/heads/master
| 2021-05-14T08:33:05.233204
| 2018-01-05T18:36:25
| 2018-01-05T18:36:25
| 116,301,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
for i in range(10, 100):
if i == 2 * (i // 10) * (i % 10):
print(i)
|
[
"sokiryanskaya@gmail.com"
] |
sokiryanskaya@gmail.com
|
7fb5fe40e39e71f3445a4c13335e48e4814501df
|
e3f7ef69e0869a089dde133d2655d213429dcaa6
|
/master/app_url.py
|
e0b432c1b9ebc9193a631cdefef62587cfeaabb2
|
[] |
no_license
|
Chunshan-Theta/worker-sample
|
cdb66e5f9e9e455cba9d092690f53c4a10f82470
|
df92e99cf26d69526f946f4ae24b81e261cabeab
|
refs/heads/master
| 2023-08-11T09:30:23.077020
| 2021-10-04T09:03:28
| 2021-10-04T09:03:28
| 413,346,403
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,494
|
py
|
import os
import tornado.web
from tornado_swagger.setup import setup_swagger
from handler.api.query.testHandler import testHandler
# jaeger
# from jaeger_client import Config
# import tornado_opentracing
# def init_jaeger_tracer(service_name):
# config = Config(
# config={ # usually read from some yaml config
# 'sampler': {
# 'type': 'probabilistic',
# 'param': 1,
# },
# 'local_agent': {
# 'reporting_host': '10.205.48.66',
# },
# },
# service_name=service_name,
# validate=True,
# )
# # Create your opentracing tracer using TornadoScopeManager for active Span handling.
# return config.initialize_tracer()
# urls initial
urls = [
]
class Application(tornado.web.Application):
_routes = [
tornado.web.url(r"/status/Test", testHandler)
]
def __init__(self, **settings):
setup_swagger(
self._routes,
swagger_url="/docs",
api_base_url="/",
description="",
api_version="1.0.0",
title="Journal API",
contact="name@domain",
schemes=["http"],
security_definitions={
"ApiKeyAuth": {"type": "apiKey", "in": "header", "name": "X-API-Key"}
},
)
super(Application, self).__init__(self._routes, **settings)
# tornado_opentracing.init_tracing()
app = Application()
|
[
"gavin19950511@gmail.com"
] |
gavin19950511@gmail.com
|
e9fab81e0de067d36417344ae4412685aa624ed5
|
7edcf3ccddc42c82ec71409cbf4df6e2aa3512c1
|
/backend/medicar/medicos/serializers.py
|
cb1306af30b0a7228c9f38d4e690480b9953a737
|
[] |
no_license
|
FabriciaDiniz/desafio-drf
|
5b3d097c42aec26783aff540e04270b6a077b47d
|
ce1ad4bcfb1a549114b1e073f67cccdcbf27c6e0
|
refs/heads/master
| 2023-08-11T00:49:52.209838
| 2021-07-10T14:46:14
| 2021-07-10T14:46:14
| 246,699,845
| 1
| 0
| null | 2021-09-22T18:47:49
| 2020-03-11T23:22:42
|
Python
|
UTF-8
|
Python
| false
| false
| 351
|
py
|
from rest_framework import serializers
from medicar.medicos.models import Medico
from medicar.especialidades.serializers import EspecialidadeSerializer
class MedicoSerializer(serializers.ModelSerializer):
especialidade = EspecialidadeSerializer()
class Meta:
model = Medico
fields = ['id', 'crm', 'nome', 'especialidade']
|
[
"afabriciadiniz@gmail.com"
] |
afabriciadiniz@gmail.com
|
9b60afbff6906ef3ee3df2f31b5bc23cea3cf2c5
|
04e080a00f37a3501c5060380d65c5a6cd669d90
|
/thonnycontrib/m5stack/esp32_api_stubs/flashbdev.py
|
58a2c79a1bb97df766a35f8c9f2d127bf84ae80a
|
[
"MIT"
] |
permissive
|
thonny/thonny-m5stack
|
473a2876e72b88d283d8b9d64189028ef7fea111
|
a502579ad5e264342ae0bc2c554c78527053693b
|
refs/heads/master
| 2020-04-20T14:57:15.605699
| 2019-11-18T22:28:36
| 2019-11-18T22:28:36
| 168,914,658
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
class FlashBdev:
''
SEC_SIZE = 4096
START_SEC = 512
def ioctl():
pass
def readblocks():
pass
def writeblocks():
pass
bdev = None
esp = None
size = 4194304
|
[
"aivar.annamaa@gmail.com"
] |
aivar.annamaa@gmail.com
|
b578e28f68973854aaec311d53c36343622ad11e
|
7a3e0fa361b97c87229bf12ecaaf6c187cf2115f
|
/run_all_case.py
|
a1fd9bb90aaed07ec0fc71e058cf5da315f2a3fc
|
[] |
no_license
|
wing1king/test_app_api
|
fac36ff9e72b74d3ad182f35cacea9920c77e79b
|
7c11763edbac545c30dec3e46c1f1777f7318cdd
|
refs/heads/master
| 2020-04-25T22:25:01.423592
| 2019-03-19T06:33:30
| 2019-03-19T06:33:30
| 173,079,842
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 792
|
py
|
import sys
import time
import unittest
from imp import reload
from HTMLTestRunner import HTMLTestRunner
reload(sys)
# 定义测试用例的目录为当前目录
test_dir = 'E:\\test_api\\test_case'
discover = unittest.defaultTestLoader.discover(test_dir, pattern="test*.py")
if __name__ == "__main__":
# 按照一定的格式获取当前的时间
now = time.strftime("%Y-%m-%d %H-%M-%S")
# 定义报告存放路径
filename = r'D:' + now + 'test_teacher_result.html'
fp = open(filename, "wb")
# 定义测试报告
runner = HTMLTestRunner(stream=fp,
title="接口测试报告",
description="测试用例执行情况:")
# 运行测试
runner.run(discover)
# 关闭报告文件
fp.close()
|
[
"zhang4475153@126.com"
] |
zhang4475153@126.com
|
08c3401e154025c6a50d54dee15177148e7f3139
|
d66993b0383ee7a97c9d5fe761269a3cb8e67e22
|
/Ejercicios/Tupla.py
|
8757b1345bab5aa430ee8da723ecea3804a04e16
|
[] |
no_license
|
rramosaveros/CursoPythonCisco
|
09828e3d8190490c0dc30861ae241f5222e108d6
|
1508e67873adfcf31b8c78d3f5cb2a0572dfeb1c
|
refs/heads/master
| 2023-06-27T12:07:52.652780
| 2021-08-01T14:19:19
| 2021-08-01T14:19:19
| 391,647,026
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 146
|
py
|
miTupla = (1, 10, 100, 1000)
print(miTupla[0])
print(miTupla[-1])
print(miTupla[1:])
print(miTupla[:-2])
for elem in miTupla:
print(elem)
|
[
"ramoslenin32@gmail.com"
] |
ramoslenin32@gmail.com
|
6bedb5a9d61f5d22a8e0acab5e5d79d7a4cc82ee
|
b76211ae410ba39c641f55d4ae756473ea93aa27
|
/posts/migrations/0007_post_category.py
|
4befa64eb9ae739b4391b1cc5a6d8bc866fb2fe4
|
[] |
no_license
|
mabutotalent/tah_cms
|
ebfabbbbe6ee490a7f2e84408b5889a67f13b71e
|
886b16b84f147cbc09f83bbeb4ad464394a08b62
|
refs/heads/master
| 2022-12-28T18:58:08.224622
| 2020-10-10T19:08:43
| 2020-10-10T19:08:43
| 302,965,633
| 0
| 0
| null | 2020-10-10T19:08:44
| 2020-10-10T18:28:46
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 544
|
py
|
# Generated by Django 3.1 on 2020-09-07 23:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('categories', '0001_initial'),
('posts', '0006_auto_20200907_1041'),
]
operations = [
migrations.AddField(
model_name='post',
name='category',
field=models.ForeignKey(default=True, on_delete=django.db.models.deletion.CASCADE, to='categories.category'),
),
]
|
[
"noreply@github.com"
] |
mabutotalent.noreply@github.com
|
fe40a6666372cb5bcf915e200311decfb99c1142
|
31f2493364589800ef2b821ee6b121f3b22d1b34
|
/Assignment4/view.py
|
71b32ed4204eb2120399c3689c02c3af15938be9
|
[] |
no_license
|
Kskulski/Software-Engineering
|
89ab78eb7ba6c0f386009ae085fc6c57bceb05e4
|
7e71da9b3bd326daca97085d8654c433d2a68c49
|
refs/heads/master
| 2020-09-09T05:52:42.756023
| 2019-12-08T22:47:10
| 2019-12-08T22:47:10
| 221,366,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,448
|
py
|
import tkinter as Tk
class View():
'''View in the MVC pattern assumes role of rendering user
interface to the user, and maintaining an up to date view as
it handles user interaction it receives from Controller.
'''
def _add_numbers_keypad(self, frame):
# calculator display
self.display = Tk.Label(frame, text=0, width=12, height=1)
self.display.grid(row=0, column=0, columnspan=10, pady=5)
#calculator numbers pad
self.one = Tk.Button(frame, text="1")
self.one.grid(row=1, column=0)
self.two = Tk.Button(frame, text="2")
self.two.grid(row=1, column=1)
self.three = Tk.Button(frame, text="3")
self.three.grid(row=1, column=2)
self.four = Tk.Button(frame, text="4")
self.four.grid(row=2, column=0)
self.five = Tk.Button(frame, text="5")
self.five.grid(row=2, column=1)
self.six = Tk.Button(frame, text="6")
self.six.grid(row=2, column=2)
self.seven = Tk.Button(frame, text="7")
self.seven.grid(row=3, column=0)
self.eight = Tk.Button(frame, text="8")
self.eight.grid(row=3, column=1)
self.nine = Tk.Button(frame, text="9")
self.nine.grid(row=3, column=2)
self.zero = Tk.Button(frame, text="0")
self.zero.grid(row=4, column=1)
def _add_operations_keypad(self, frame):
#operations pad
self.clear = Tk.Button(frame, text="C")
self.clear.grid(row=4, column=0)
self.equal = Tk.Button(frame, text="=")
self.equal.grid(row=4, column=2)
self.add = Tk.Button(frame, text="+")
self.add.grid(row=2, column=5)
self.sub = Tk.Button(frame, text="-")
self.sub.grid(row=3, column=5)
self.mul = Tk.Button(frame, text="*")
self.mul.grid(row=2, column=6)
self.div = Tk.Button(frame, text="/")
self.div.grid(row=3, column=6)
def __init__(self):
self.root = Tk.Tk()
self.root.title("MVC example: Calculator")
self.root.geometry()
self._frame = Tk.Frame(self.root)
self._frame.pack()
self._add_numbers_keypad(self._frame)
self._add_operations_keypad(self._frame)
def refresh(self, value):
self.display.config(text= value)
def attach_keyboard(self, callback):
self.root.bind("<Key>", callback)
def start(self):
self.root.mainloop()
|
[
"kevinpskulski@lewisu.edu"
] |
kevinpskulski@lewisu.edu
|
7d3509680519445f4111b9208c32d64a387c8b62
|
811576ca7dfadafee7f187854b8c3c62df533ca0
|
/scripts/PrefixScanSVG/steps-to-svg.py
|
ec036d241249d92860d485ed29ffd8344df331b6
|
[
"Apache-2.0"
] |
permissive
|
s-ueno/Insights
|
84a33253c9c44c299ffaac68d77d5b7e074e8f8e
|
3d72a80ee26b08da7114c3526a97fc3c27efa14e
|
refs/heads/main
| 2023-08-13T15:36:15.828432
| 2021-09-27T21:44:59
| 2021-09-27T21:44:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,344
|
py
|
import json
from graphviz import Digraph
def remove_prefix(prefix, value):
if len(prefix) == 0 or len(prefix) == len(value):
return value
if not value.startswith(prefix):
raise "{value} does not start with {prefix}".format(value, prefix)
suffix = value[len(prefix):]
if len(suffix) == 0:
print(repr(prefix))
print(repr(value))
raise 'oops!'
return "..." + suffix
def get_package_id(value):
return value.split('$')[-1]
def get_package_url(value, version=None):
id = get_package_id(value)
url = "https://www.nuget.org/packages/{id}".format(id=id)
if version:
url += "/" + version
return url
bfs_data = json.load(open('steps-example1-BFS.json', 'rb'))
dfs_data = json.load(open('steps-example1-DFS.json', 'rb'))
is_nuget = False
dfs = Digraph(comment='DFS', filename='dfs', format='svg')
dfs.attr('graph', rankdir='LR')
id_to_node = {node['Id']: node for node in dfs_data}
id_to_children = {}
for node in dfs_data:
if node['ParentId']:
if node['ParentId'] not in id_to_children:
id_to_children[node['ParentId']] = []
id_to_children[node['ParentId']].append(node)
query_count = 0
for step in dfs_data:
if step['ParentId']:
prefix_parent = id_to_node[step['ParentId']]
while 'PartitionKeyPrefix' not in prefix_parent['Data']:
prefix_parent = id_to_node[prefix_parent['ParentId']]
prefix = prefix_parent['Data']['PartitionKeyPrefix']
else:
prefix = ''
if step['Data']['Type'] == 'Start':
if not is_nuget:
continue
dfs.attr('node', shape='box', style='rounded')
label = '<<b>Start</b>>'
elif step['Data']['Type'] == 'EntitySegment':
dfs.attr('node', shape='box', style='filled', fillcolor='darkseagreen1')
if not is_nuget:
if step['Data']['Count'] == 1:
label = "<<b>1 result</b>: {0}, {1}>".format(
step['Data']['First']['PartitionKey'],
step['Data']['First']['RowKey'],
step['Data']['Count']
)
else:
label = "<<b>{0} results</b>: {1}, {2} - {3}, {4}>".format(
step['Data']['Count'],
step['Data']['First']['PartitionKey'],
step['Data']['First']['RowKey'],
step['Data']['Last']['PartitionKey'],
step['Data']['Last']['RowKey']
)
elif (step['Data']['First']['PartitionKey'] == step['Data']['Last']['PartitionKey']):
label = """<
<table border="0" cellspacing="0" cellpadding="5">
<tr>
<td href="{url}" title="{id}"><u><font color="blue">{pk}</font></u></td>
<td>({count} total)</td>
</tr>
</table>
>""".format(
id=get_package_id(step['Data']['First']['PartitionKey']),
url=get_package_url(step['Data']['First']['PartitionKey']),
pk=remove_prefix(prefix, step['Data']['First']['PartitionKey']),
count=step['Data']['Count'])
else:
label = """<
<table border="0" cellspacing="0" cellpadding="5">
<tr>
<td title="{first_id}" href="{first_url}"><u><font color="blue">{first_pk}</font></u></td>
<td>-</td>
<td title="{last_id}" href="{last_url}"><u><font color="blue">{last_pk}</font></u></td>
<td> ({count} total)</td>
</tr>
</table>
>""".format(
first_id=get_package_id(step['Data']['First']['PartitionKey']),
first_url=get_package_url(step['Data']['First']['PartitionKey'], step['Data']['First']['RowKey']),
first_pk=remove_prefix(prefix, step['Data']['First']['PartitionKey']),
last_id=get_package_id(step['Data']['Last']['PartitionKey']),
last_url=get_package_url(step['Data']['Last']['PartitionKey'], step['Data']['Last']['RowKey']),
last_pk=remove_prefix(prefix, step['Data']['Last']['PartitionKey']),
count=step['Data']['Count'])
elif step['Data']['Type'] == 'PartitionKeyQuery':
query_count += 1
dfs.attr('node', shape='box', style='filled', fillcolor='beige')
if not is_nuget:
label = "<<b>Query {0}</b>: PK = '{1}' and RK > '{2}'>".format(
query_count,
step['Data']['PartitionKey'],
step['Data']['RowKeySkip'],
)
elif step['Id'] in id_to_children:
label = """<
<table border="0" cellspacing="0" cellpadding="5">
<tr>
<td title="{id}" href="{url}"><u><font color="blue">{pk}</font></u></td>
</tr>
</table>
>""".format(
id=get_package_id(step['Data']['PartitionKey']),
url=get_package_url(step['Data']['PartitionKey']),
pk=remove_prefix(prefix, step['Data']['PartitionKey']))
else:
label = "pk: " + remove_prefix(prefix, step['Data']['PartitionKey'])
elif step['Data']['Type'] == 'PrefixQuery':
query_count += 1
dfs.attr('node', shape='box', style='filled', fillcolor='darkslategray1')
if not is_nuget:
if len(step['Data']['PartitionKeyLowerBound']) == 0:
label = "<<b>Query {0}</b>: PK = '{1}*'>".format(
query_count,
step['Data']['PartitionKeyPrefix'])
else:
label = "<<b>Query {0}</b>: PK = '{1}*' and PK > '{2}'>".format(
query_count,
step['Data']['PartitionKeyPrefix'],
step['Data']['PartitionKeyLowerBound'])
else:
label = remove_prefix(prefix, step['Data']['PartitionKeyPrefix']) + "*"
else:
raise 'Unknown node type'
# dfs.attr('node', rank=str(step['Data']['Depth']))
id = str(step['Id'])
dfs.node(id, label=label)
if step['ParentId'] and step['ParentId'] > 1:
dfs.edge(str(step['ParentId']), id)
dfs.save(filename="dfs.dot")
dfs.view()
|
[
"joel.verhagen@gmail.com"
] |
joel.verhagen@gmail.com
|
a7cd83c394ab81e6d72c0af8cf0570e883c36b37
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/ZhimaMerchantContractOnofferQueryResponse.py
|
2a52ac0db39df098f3235a07b7272a0fd3afd509
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 9,906
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class ZhimaMerchantContractOnofferQueryResponse(AlipayResponse):
def __init__(self):
super(ZhimaMerchantContractOnofferQueryResponse, self).__init__()
self._cancel_operator = None
self._cancel_supported = None
self._contract_content = None
self._contract_no = None
self._contract_principal_desc = None
self._contract_principal_logo = None
self._contract_status = None
self._ext_info = None
self._fufilment_callback_url = None
self._fufilment_cnt = None
self._fufilment_desc = None
self._fufilment_end_time = None
self._fufilment_period_type = None
self._fufilment_start_time = None
self._gmt_accept = None
self._gmt_cancel = None
self._gmt_due = None
self._gmt_end = None
self._item_end_time = None
self._item_no = None
self._item_start_time = None
self._offer_creater_id = None
self._offer_creater_name = None
self._offer_creater_type = None
self._out_biz_no = None
self._out_content_no = None
self._sign_principal_id = None
self._sign_principal_type = None
self._subjects = None
@property
def cancel_operator(self):
return self._cancel_operator
@cancel_operator.setter
def cancel_operator(self, value):
self._cancel_operator = value
@property
def cancel_supported(self):
return self._cancel_supported
@cancel_supported.setter
def cancel_supported(self, value):
self._cancel_supported = value
@property
def contract_content(self):
return self._contract_content
@contract_content.setter
def contract_content(self, value):
self._contract_content = value
@property
def contract_no(self):
return self._contract_no
@contract_no.setter
def contract_no(self, value):
self._contract_no = value
@property
def contract_principal_desc(self):
return self._contract_principal_desc
@contract_principal_desc.setter
def contract_principal_desc(self, value):
self._contract_principal_desc = value
@property
def contract_principal_logo(self):
return self._contract_principal_logo
@contract_principal_logo.setter
def contract_principal_logo(self, value):
self._contract_principal_logo = value
@property
def contract_status(self):
return self._contract_status
@contract_status.setter
def contract_status(self, value):
self._contract_status = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def fufilment_callback_url(self):
return self._fufilment_callback_url
@fufilment_callback_url.setter
def fufilment_callback_url(self, value):
self._fufilment_callback_url = value
@property
def fufilment_cnt(self):
return self._fufilment_cnt
@fufilment_cnt.setter
def fufilment_cnt(self, value):
self._fufilment_cnt = value
@property
def fufilment_desc(self):
return self._fufilment_desc
@fufilment_desc.setter
def fufilment_desc(self, value):
self._fufilment_desc = value
@property
def fufilment_end_time(self):
return self._fufilment_end_time
@fufilment_end_time.setter
def fufilment_end_time(self, value):
self._fufilment_end_time = value
@property
def fufilment_period_type(self):
return self._fufilment_period_type
@fufilment_period_type.setter
def fufilment_period_type(self, value):
self._fufilment_period_type = value
@property
def fufilment_start_time(self):
return self._fufilment_start_time
@fufilment_start_time.setter
def fufilment_start_time(self, value):
self._fufilment_start_time = value
@property
def gmt_accept(self):
return self._gmt_accept
@gmt_accept.setter
def gmt_accept(self, value):
self._gmt_accept = value
@property
def gmt_cancel(self):
return self._gmt_cancel
@gmt_cancel.setter
def gmt_cancel(self, value):
self._gmt_cancel = value
@property
def gmt_due(self):
return self._gmt_due
@gmt_due.setter
def gmt_due(self, value):
self._gmt_due = value
@property
def gmt_end(self):
return self._gmt_end
@gmt_end.setter
def gmt_end(self, value):
self._gmt_end = value
@property
def item_end_time(self):
return self._item_end_time
@item_end_time.setter
def item_end_time(self, value):
self._item_end_time = value
@property
def item_no(self):
return self._item_no
@item_no.setter
def item_no(self, value):
self._item_no = value
@property
def item_start_time(self):
return self._item_start_time
@item_start_time.setter
def item_start_time(self, value):
self._item_start_time = value
@property
def offer_creater_id(self):
return self._offer_creater_id
@offer_creater_id.setter
def offer_creater_id(self, value):
self._offer_creater_id = value
@property
def offer_creater_name(self):
return self._offer_creater_name
@offer_creater_name.setter
def offer_creater_name(self, value):
self._offer_creater_name = value
@property
def offer_creater_type(self):
return self._offer_creater_type
@offer_creater_type.setter
def offer_creater_type(self, value):
self._offer_creater_type = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def out_content_no(self):
return self._out_content_no
@out_content_no.setter
def out_content_no(self, value):
self._out_content_no = value
@property
def sign_principal_id(self):
return self._sign_principal_id
@sign_principal_id.setter
def sign_principal_id(self, value):
self._sign_principal_id = value
@property
def sign_principal_type(self):
return self._sign_principal_type
@sign_principal_type.setter
def sign_principal_type(self, value):
self._sign_principal_type = value
@property
def subjects(self):
return self._subjects
@subjects.setter
def subjects(self, value):
self._subjects = value
def parse_response_content(self, response_content):
response = super(ZhimaMerchantContractOnofferQueryResponse, self).parse_response_content(response_content)
if 'cancel_operator' in response:
self.cancel_operator = response['cancel_operator']
if 'cancel_supported' in response:
self.cancel_supported = response['cancel_supported']
if 'contract_content' in response:
self.contract_content = response['contract_content']
if 'contract_no' in response:
self.contract_no = response['contract_no']
if 'contract_principal_desc' in response:
self.contract_principal_desc = response['contract_principal_desc']
if 'contract_principal_logo' in response:
self.contract_principal_logo = response['contract_principal_logo']
if 'contract_status' in response:
self.contract_status = response['contract_status']
if 'ext_info' in response:
self.ext_info = response['ext_info']
if 'fufilment_callback_url' in response:
self.fufilment_callback_url = response['fufilment_callback_url']
if 'fufilment_cnt' in response:
self.fufilment_cnt = response['fufilment_cnt']
if 'fufilment_desc' in response:
self.fufilment_desc = response['fufilment_desc']
if 'fufilment_end_time' in response:
self.fufilment_end_time = response['fufilment_end_time']
if 'fufilment_period_type' in response:
self.fufilment_period_type = response['fufilment_period_type']
if 'fufilment_start_time' in response:
self.fufilment_start_time = response['fufilment_start_time']
if 'gmt_accept' in response:
self.gmt_accept = response['gmt_accept']
if 'gmt_cancel' in response:
self.gmt_cancel = response['gmt_cancel']
if 'gmt_due' in response:
self.gmt_due = response['gmt_due']
if 'gmt_end' in response:
self.gmt_end = response['gmt_end']
if 'item_end_time' in response:
self.item_end_time = response['item_end_time']
if 'item_no' in response:
self.item_no = response['item_no']
if 'item_start_time' in response:
self.item_start_time = response['item_start_time']
if 'offer_creater_id' in response:
self.offer_creater_id = response['offer_creater_id']
if 'offer_creater_name' in response:
self.offer_creater_name = response['offer_creater_name']
if 'offer_creater_type' in response:
self.offer_creater_type = response['offer_creater_type']
if 'out_biz_no' in response:
self.out_biz_no = response['out_biz_no']
if 'out_content_no' in response:
self.out_content_no = response['out_content_no']
if 'sign_principal_id' in response:
self.sign_principal_id = response['sign_principal_id']
if 'sign_principal_type' in response:
self.sign_principal_type = response['sign_principal_type']
if 'subjects' in response:
self.subjects = response['subjects']
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
40dbde0013cb65e7a43ae769c10ad23ad832acc2
|
9897526c0a108f1d0d423ca7290286de3ed85727
|
/parser_django/breuninger_shop/models.py
|
742d1d8ba624d092e122c2bf747548eed1556d94
|
[] |
no_license
|
NedelkoA/shop_parser
|
faf7064ede5612748664beea6fb8bd75b1f3682a
|
0d0c7cb7e924d6cb5c999cdc82a392f6171010a8
|
refs/heads/master
| 2020-03-25T08:34:12.928353
| 2018-08-10T15:43:22
| 2018-08-10T15:43:22
| 143,620,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
from django.db import models
class ItemModel(models.Model):
name = models.CharField(max_length=60)
brand = models.CharField(max_length=60)
price = models.FloatField()
description = models.TextField()
size = models.TextField(null=True)
currency = models.CharField(max_length=4, null=True)
image = models.TextField()
def __str__(self):
return self.name
|
[
"nedartem@mail.ru"
] |
nedartem@mail.ru
|
f3536713520f35a420c9733cf76fdefc9270f359
|
d9d44028fe7fdc5c5d8a9cf8af423b1bb46ebff0
|
/data_api/api/tests.py
|
cb85397057f2a4945b2bdda92c1b8b01d10556f7
|
[] |
no_license
|
xkmato/data_api
|
b19f7d19706bd354740d960be66038d36106676d
|
732c1c147a8346e22b744699d3ce449e3c5a70f2
|
refs/heads/master
| 2023-01-03T03:51:18.764841
| 2016-09-22T09:54:25
| 2016-09-22T09:54:25
| 40,356,292
| 0
| 1
| null | 2022-12-26T19:50:57
| 2015-08-07T11:01:15
|
Python
|
UTF-8
|
Python
| false
| false
| 6,237
|
py
|
from datetime import datetime
from django.utils import unittest
from data_api.api.models import Org, Urn, Group, Contact, Broadcast, Campaign, Flow, Event, Label, Message, Run, \
Boundary, Result
__author__ = 'kenneth'
class FakeTemba(object):
__dict__ = {}
def __init__(self, **kwargs):
for k, v in kwargs.items():
self.__dict__ = kwargs
setattr(self, k, v)
class TestModels(unittest.TestCase):
def setUp(self):
org = Org.objects.first()
self.org = org
self.urns = ['tel:1234', 'twitter:5678', '876565']
self.temba_group = FakeTemba(uuid='090IOU98', name='test_group', size=1)
self.temba_contact = FakeTemba(uuid='97976768', name='test_contact', urns=self.urns,
groups=[self.temba_group.uuid], fields={'name': 'test_field'}, language='en',
modified_on=datetime.now())
self.temba_broadcast = FakeTemba(id=1, urns=self.urns, contacts=[self.temba_contact.uuid],
groups=[self.temba_group.uuid], text='test test message', status='S',
created_on=datetime.now())
self.temba_campaign = FakeTemba(uuid='IOUIU8908', name='test_campaign', group=self.temba_group.uuid,
created_on=datetime.now())
self.rule_set = FakeTemba(uuid='iteueiot', label='some label', response_type='I')
self.temba_flow = FakeTemba(uuid='89077897897', name='test_flow', archived='T', labels=[], participants=3,
runs=3, completed_runs=2, rulesets=[self.rule_set], created_on=datetime.now())
self.temba_event = FakeTemba(uuid='79079079078', campaign=self.temba_campaign.uuid, relative_to='yuyyer',
offset=5, unit='something', delivery_hour=4, message='Some message',
flow=self.temba_flow.uuid, created_on=datetime.now())
self.temba_label = FakeTemba(uuid='0789089789', name='test_label', count=5)
self.temba_message = FakeTemba(id=242, broadcast=self.temba_broadcast.id, contact=self.temba_contact.uuid,
urn=self.urns[0], status='S', type='F', labels=[self.temba_label.name],
direction='I', archived='F', text='Hi There', created_on=datetime.now(),
delivered_on=datetime.now(), sent_on=datetime.now())
self.temba_run_value_set = FakeTemba(node='90890', category='SC', text='Some Text', rule_value='Y', value='yes',
label='some', time=datetime.now())
self.temba_flow_step = FakeTemba(node='Some Node', text='Yo yo', value='youngh', type='I',
arrived_on=datetime.now(), left_on=datetime.now())
self.temba_run = FakeTemba(id=43, flow=self.temba_flow.uuid, contact=self.temba_contact.uuid,
steps=[self.temba_flow_step], values=[self.temba_run_value_set],
create_on=datetime.now(), completed='y')
self.temba_geometry = FakeTemba(type='some geo type', coordinates='gulu lango')
self.temba_boundary = FakeTemba(boundary='some boundary', name='test_boundary', level='U', parent='b',
geometry=[self.temba_geometry])
self.temba_category_stats = FakeTemba(count=10, label='stats')
self.temba_result = FakeTemba(boundary=None, set=4, unset=5, open_ended='open ended?', label='result1',
categories=[self.temba_category_stats])
def test_create_from_temba(self):
urn = Urn.create_from_temba(self.urns[0])
self.assertEqual((urn.type, urn.identity), tuple(self.urns[0].split(':')))
group_count = Group.objects.count()
Group.create_from_temba(self.org, self.temba_group)
self.assertEqual(group_count+1, Group.objects.count())
contact_count = Contact.objects.count()
Contact.create_from_temba(self.org, self.temba_contact)
self.assertEqual(contact_count+1, Contact.objects.count())
broadcast_count = Broadcast.objects.count()
Broadcast.create_from_temba(self.org, self.temba_broadcast)
self.assertEqual(broadcast_count+1, Broadcast.objects.count())
campaign_count = Campaign.objects.count()
Campaign.create_from_temba(self.org, self.temba_campaign)
self.assertEqual(campaign_count+1, Campaign.objects.count())
flow_count = Flow.objects.count()
flow = Flow.create_from_temba(self.org, self.temba_flow)
self.assertEqual(flow_count+1, Flow.objects.count())
self.assertEqual(flow.rulesets[0].uuid, self.rule_set.uuid)
event_count = Event.objects.count()
Event.create_from_temba(self.org, self.temba_event)
self.assertEqual(event_count+1, Event.objects.count())
label_count = Label.objects.count()
Label.create_from_temba(self.org, self.temba_label)
self.assertEqual(label_count+1, Label.objects.count())
message_count = Message.objects.count()
Message.create_from_temba(self.org, self.temba_message)
self.assertEqual(message_count+1, Message.objects.count())
run_count = Run.objects.count()
run = Run.create_from_temba(self.org, self.temba_run)
self.assertEqual(run_count+1, Run.objects.count())
self.assertEqual(run.values[0].text, self.temba_run_value_set.text)
self.assertEqual(run.steps[0].text, self.temba_flow_step.text)
boundary_count = Boundary.objects.count()
boundary = Boundary.create_from_temba(self.org, self.temba_boundary)
self.assertEqual(boundary_count+1, Boundary.objects.count())
self.assertEqual(boundary.geometry[0].coordinates, self.temba_geometry.coordinates)
result_count = Result.objects.count()
result = Result.create_from_temba(self.org, self.temba_result)
self.assertEqual(result_count+1, Result.objects.count())
self.assertEqual(result.categories[0].label, self.temba_category_stats.label)
|
[
"kbonky@gmail.com"
] |
kbonky@gmail.com
|
29f41c2d015defff1c1e2476495e93db48b05ffd
|
53d030c44cff0a7da956f406c434fe1b2a00589a
|
/token_preds_evaluate.py
|
d3880daa0b39fd139eaaf195521d5a13ed39ef9f
|
[] |
no_license
|
bujol12/bert-seq-interpretability
|
ef68716f69482cac334679867849618f1eb53bc2
|
eed39dceff3560ea3311bc2540671783414e3cce
|
refs/heads/master
| 2023-05-11T07:15:23.339979
| 2021-06-06T12:42:07
| 2021-06-06T12:42:34
| 294,078,771
| 10
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,668
|
py
|
import sys
import os
from utils.tsv_dataset import (
TSVClassificationDataset,
Split,
get_labels,
compute_seq_classification_metrics,
)
from utils.arguments import datasets, DataTrainingArguments, ModelArguments
from sklearn.metrics import average_precision_score
import logging
from math import sqrt
from utils.arguments import (
datasets,
DataTrainingArguments,
ModelArguments,
parse_config,
)
from utils.model import SeqClassModel
logging.basicConfig(level=logging.INFO)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def choose_top_and_threshold(
examples, importance_threshold, top_count, pos_label, default_label
):
y_pred = []
# print(importance_threshold)
for example in examples:
predictions = []
scores = [
(idx, float(example.labels[idx])) for idx in range(0, len(example.labels))
]
scores.sort(key=lambda x: x[1])
labels = list(map(lambda x: x[0], scores[-top_count:]))
count = 0
for idx in range(0, len(example.labels)):
label = example.labels[idx]
# label = 1.0 - float(label)
# print(label, importance_threshold, idx)
# print(float(label) >= importance_threshold, idx in labels)
if float(label) >= importance_threshold and idx in labels:
predictions.append(pos_label)
count += 1
else:
predictions.append(default_label)
example.predictions = predictions
y_pred.append(predictions)
return y_pred
def pred_stats(y_true, y_pred, label):
predicted_cnt = 0
correct_cnt = 0
total_cnt = 0
for i in range(0, len(y_true)):
# print(i, len(y_true[i]), len(y_pred[i]))
for j in range(0, len(y_true[i])):
if y_pred[i][j] == label:
predicted_cnt += 1
if y_pred[i][j] == label and y_pred[i][j] == y_true[i][j]:
correct_cnt += 1
if y_true[i][j] == label:
total_cnt += 1
return {
"predicted_cnt": predicted_cnt,
"correct_cnt": correct_cnt,
"total_cnt": total_cnt,
}
def get_pred_scores(y_true, y_pred, label):
pred_stats_res = pred_stats(y_true, y_pred, label)
print(pred_stats_res)
res = {}
res["precision"] = (
pred_stats_res["correct_cnt"] / pred_stats_res["predicted_cnt"]
if pred_stats_res["predicted_cnt"] > 0
else 0.0
)
res["recall"] = (
pred_stats_res["correct_cnt"] / pred_stats_res["total_cnt"]
if pred_stats_res["total_cnt"] > 0
else 0.0
)
res["f1"] = (
(2.0 * res["precision"] * res["recall"] / (res["precision"] + res["recall"]))
if (res["precision"] + res["recall"]) > 0
else 0.0
)
res["f0.5"] = (
((1 + 0.5 * 0.5) * res["precision"] * res["recall"])
/ (0.5 * 0.5 * res["precision"] + res["recall"])
if (0.5 * 0.5 * res["precision"] + res["recall"]) > 0
else 0.0
)
return res
def get_corr(y_target, y_pred):
sum_pred = 0.0
sum_target = 0.0
count = 0.0
assert len(y_pred) == len(y_target)
for i in range(0, len(y_target)):
assert len(y_pred[i]) == len(y_target[i])
for j in range(0, len(y_target[i])):
if y_target[i][j] >= 0:
# print(y_target[i][j], y_pred[i][j])
count += 1.0
sum_pred += y_pred[i][j]
sum_target += y_target[i][j]
sq_diff_pred = 0.0
sq_diff_target = 0.0
diff_sum = 0.0
mean_pred = sum_pred / count
mean_target = sum_target / count
for i in range(0, len(y_target)):
assert len(y_pred[i]) == len(y_target[i])
for j in range(0, len(y_target[i])):
if y_target[i][j] >= 0:
sq_diff_pred += (y_pred[i][j] - mean_pred) ** 2
sq_diff_target += (y_target[i][j] - mean_target) ** 2
diff_sum += (y_pred[i][j] - mean_pred) * (y_target[i][j] - mean_target)
# print (diff_sum, sq_diff_pred, sq_diff_target, mean_pred, mean_target)
corr = (
diff_sum / (sqrt(sq_diff_pred) * sqrt(sq_diff_target))
if sq_diff_pred != 0 and sq_diff_target != 0
else 0.0
)
return corr
def get_map(y_true, y_pred, label):
sum_val = 0.0
assert len(y_true) == len(y_pred)
cnt = 0
for i in range(len(y_true)):
if (
max(y_true[i]) > 0.0
): # only calculate MAP over sentences with positive tokens
# logger.info("Results:")
# logger.info(y_true[i])
# logger.info(y_pred[i])
ap = average_precision_score(y_true[i], y_pred[i])
# logger.info(ap)
sum_val += ap
cnt += 1
return sum_val / cnt # mean AP
if __name__ == "__main__":
if len(sys.argv) < 2:
logger.error("Required args: [config_path]")
exit()
logger.info("Parsing Config.")
config_dict = parse_config(sys.argv[1])
dataset = datasets[config_dict["dataset"]]
labels = get_labels(dataset.labels)
positive_label = dataset.positive_label
attn_head_id = None
attn_layer_id = None
if config_dict["method"] == "model_attention":
if len(sys.argv) != 4:
logger.error("Required args: [config_path] [layer_id] [head_id]")
exit()
attn_head_id = int(sys.argv[3])
attn_layer_id = int(sys.argv[2])
input_dir = config_dict["results_input_dir"].format(
method=config_dict["method"],
experiment_name=config_dict["experiment_name"].format(
attn_layer_id, attn_head_id
),
model_name=config_dict["model_name"],
dataset_name=config_dict["dataset"],
datetime=config_dict.get("datetime", ""),
)
str2mode = {"dev": Split.dev, "train": Split.train, "test": Split.test}
mode = str2mode[config_dict["dataset_split"]]
data_config = dict(
labels=labels,
max_seq_length=config_dict["max_seq_length"],
overwrite_cache=dataset.overwrite_cache,
make_all_labels_equal_max=False,
default_label=config_dict["test_label_dummy"],
is_seq_class=False,
lowercase=config_dict["lowercase"],
mode=mode,
model_type="token_eval",
)
logger.info("Reading Token Results.")
results_dataset = TSVClassificationDataset(
input_dir,
tokenizer=None,
file_name=config_dict["results_input_filename"],
normalise_labels=config_dict.get("normalise_preds", False),
**data_config,
)
logger.info("Reading gold labels.")
eval_dataset = TSVClassificationDataset(
dataset.data_dir,
tokenizer=None,
file_name=dataset.file_name_token,
**data_config,
)
print(len(eval_dataset.examples))
print(len(results_dataset.examples))
logger.info("Apply threshold and top count")
y_pred = choose_top_and_threshold(
results_dataset.examples,
config_dict["importance_threshold"],
int(config_dict["top_count"]),
default_label=labels[0] if labels[0] != positive_label else labels[1],
pos_label=positive_label,
)
y_true = []
for example in eval_dataset.examples:
y_true.append(example.labels)
logger.info("Get pred scores")
res = get_pred_scores(y_true, y_pred, label=positive_label)
y_pred_values = list(
map(
lambda ex: list(map(lambda x: max(float(x), 0.0), ex.labels)),
results_dataset.examples,
) # make labels be within 0 and 1
)
y_true_values = list(
map(
lambda ex: list(
map(lambda l: (1.0 if l == positive_label else 0.0), ex.labels)
),
eval_dataset.examples,
)
)
for i in range(0, len(y_true)):
if max(y_true_values[i]) > 0:
logger.info(y_true[i])
logger.info(y_true_values[i])
break
logger.info("Get MAP and Correlation metrics")
res["MAP"] = get_map(y_true_values, y_pred_values, positive_label)
res["corr"] = get_corr(y_true_values, y_pred_values)
logger.info("RESULTS:")
logger.info(str(res))
if config_dict.get("eval_results_filename", None) is not None:
logger.info("saving eval results")
filename = os.path.join(input_dir, config_dict["eval_results_filename"])
with open(filename, "w") as fhand:
fhand.write(str(res))
|
[
"kamil.bujel19@imperial.ac.uk"
] |
kamil.bujel19@imperial.ac.uk
|
320808f49265a4c45a70b217ea9fb74e72f5643f
|
3465b5932ef610742ea75e4b7935ef8b2bd0c84a
|
/Programmers_Spicier.py
|
041e713c299f068de4f84d8c15de7b793c57e3a3
|
[] |
no_license
|
taehopark0/algorithm_study
|
25f4c4b9b5b3622ef38314d883b19e0c930f0855
|
8485234db6b0f93e99abe29124a4f9c5bcbfd36f
|
refs/heads/main
| 2023-07-08T18:23:01.759851
| 2021-08-03T14:46:40
| 2021-08-03T14:46:40
| 363,688,854
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,644
|
py
|
#매운 것을 좋아하는 Leo는 모든 음식의 스코빌 지수를 K 이상으로 만들고 싶습니다.
#모든 음식의 스코빌 지수를 K 이상으로 만들기 위해 Leo는 스코빌 지수가 가장 낮은 두 개의 음식을 아래와 같이 특별한 방법으로 섞어 새로운 음식을 만듭니다.
#섞은 음식의 스코빌 지수 = 가장 맵지 않은 음식의 스코빌 지수 + (두 번째로 맵지 않은 음식의 스코빌 지수 * 2)
#Leo는 모든 음식의 스코빌 지수가 K 이상이 될 때까지 반복하여 섞습니다.
#Leo가 가진 음식의 스코빌 지수를 담은 배열 scoville과 원하는 스코빌 지수 K가 주어질 때,
#모든 음식의 스코빌 지수를 K 이상으로 만들기 위해 섞어야 하는 최소 횟수를 return 하도록 solution 함수를 작성해주세요.
# - scoville의 길이는 2 이상 1,000,000 이하입니다.
# - K는 0 이상 1,000,000,000 이하입니다.
# - scoville의 원소는 각각 0 이상 1,000,000 이하입니다.
# - 모든 음식의 스코빌 지수를 K 이상으로 만들 수 없는 경우에는 -1을 return 합니다.
scoville = [1, 2, 3, 9, 10, 12]
K = 7
def solution(scoville, K):
answer = 0
while scoville[0]< K:
a = scoville[0]+scoville[1]*2
scoville.remove(scoville[0])
scoville.remove(scoville[0])
scoville.append(a)
scoville = sorted(scoville)
if scoville[0] < K:
answer+=1
elif scoville[0] < K and len(scoville) == 0:
return -1
if scoville[0] >= K:
break
return answer+1
print(solution(scoville,K))
|
[
"taehojj@gmail.com"
] |
taehojj@gmail.com
|
395f94d1dc84e406686d748d046cf456b3ed5e25
|
70d5cfec4410f4d90d87988aa5c935387c7df32a
|
/depth4.py
|
a3793fbd1a753f46fbe5ee086907b595a2038292
|
[] |
no_license
|
csd-robocon-nitk/Robocon-2020
|
6c190961a8502d4d7f626b7e2657b9145cae8512
|
09a10f655137cfbf2d89883098561d96441c16ec
|
refs/heads/master
| 2022-04-09T08:08:46.674327
| 2020-02-18T18:51:02
| 2020-02-18T18:51:02
| 219,488,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,740
|
py
|
import pygame
import numpy as np
import sys
import cv2 as cv
from freenect import sync_get_depth as get_depth
def make_gamma():
"""
Create a gamma table
"""
num_pix = 2048 # there's 2048 different possible depth values
npf = float(num_pix)
_gamma = np.empty((num_pix, 3), dtype=np.uint16)
for i in xrange(num_pix):
v = i / npf
v = pow(v, 3) * 6
pval = int(v * 6 * 256)
lb = pval & 0xff
pval >>= 8
if pval == 0:
a = np.array([255, 255 - lb, 255 - lb], dtype=np.uint8)
elif pval == 1:
a = np.array([255, lb, 0], dtype=np.uint8)
elif pval == 2:
a = np.array([255 - lb, lb, 0], dtype=np.uint8)
elif pval == 3:
a = np.array([255 - lb, 255, 0], dtype=np.uint8)
elif pval == 4:
a = np.array([0, 255 - lb, 255], dtype=np.uint8)
elif pval == 5:
a = np.array([0, 0, 255 - lb], dtype=np.uint8)
else:
a = np.array([0, 0, 0], dtype=np.uint8)
_gamma[i] = a
return _gamma
gamma = make_gamma()
if __name__ == "__main__":
fpsClock = pygame.time.Clock()
FPS = 30 # kinect only outputs 30 fps
disp_size = (640, 480)
pygame.init()
screen = pygame.display.set_mode(disp_size)
font = pygame.font.SysFont('comicsans', 32) # provide your own font
#np.set_printoptions(threshold=sys.maxsize)
while True:
events = pygame.event.get()
for e in events:
if e.type == pygame.QUIT:
sys.exit()
fps_text = "FPS: {0:.2f}".format(fpsClock.get_fps())
# draw the pixels
depth = np.rot90(get_depth()[0]) # get the depth readinngs from the camera
pixels = gamma[depth] # the colour pixels are the depth readings overlayed onto the gamma table
depth = 100.0 / (depth*-0.0030711016 + 3.3309495161);
depth = depth-(0.0003*(depth)*(depth)-0.09633*(depth)-0.6032)- 0.04*(depth) +0.511;
# sum = 0;
# im2, contours, hierarchy = cv.findContours(depth, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# cv.drawContours(img, contours, -1, (0,255,0), 3)
#for i in range(640):
# if(depth[i][0] < 0):
# sum = sum + depth[i-100][j]
#
#
# print(sum/240)
for i in range(640):
if(depth[i][240]-depth[i+1]>50):
b1=i
elif(depth[i][240]-depth[i+1]<50):
b2=i
b3=(b1+b2)/2
print(depth[b3][240])
#print(depth.shape)
#print(pixels)
temp_surface = pygame.Surface(disp_size)
pygame.surfarray.blit_array(temp_surface, pixels)
pygame.transform.scale(temp_surface, disp_size, screen)
screen.blit(font.render(fps_text, 1, (255, 255, 255)), (30, 30))
pygame.display.flip()
fpsClock.tick(FPS)
|
[
"noreply@github.com"
] |
csd-robocon-nitk.noreply@github.com
|
60cea08e118b99d3f6c68406ec0b5024aed7ff9f
|
a8dcbfc5986747eb56c26b4157481849a55359b3
|
/component/io/mspa_io.py
|
d1211d6efb9465ac30813611b5328e094bf4a26e
|
[
"MIT"
] |
permissive
|
dfguerrerom/gwb
|
39c2c0163aeceea14d55421b2c9958c210fa839e
|
162f7e1a885945814c584a188e78d571a32a9d80
|
refs/heads/master
| 2023-04-19T21:06:54.497659
| 2021-05-19T19:30:24
| 2021-05-19T19:30:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
import json
from .gwb_io import GWBIo
from component import parameter as cp
class MspaIo(GWBIo):
def __init__(self):
# the init file
self.file = None
# all the bytes values
self.background = []
self.foreground = []
# the process
self.connectivity = cp.connectivity[0]['value']
self.edge_width = 1
self.transition = 1
self.int_ext = 1
super().__init__(process = 'mspa')
def update_byte_list(self):
"""manually update the byte_list"""
return super().update_byte_list([
self.background,
self.foreground
])
def update_params_list(self):
"""manually update the params list"""
return super().update_params_list([
self.connectivity,
self.edge_width,
self.transition,
self.int_ext,
])
def get_params_list(self):
"""get the params list for naming purposes (_ and no spaces)"""
self.update_params_list()
return super().get_params_list(self.params_list)
|
[
"pierrick.rambaud49@gmail.com"
] |
pierrick.rambaud49@gmail.com
|
769d8d41af18f2039f539e85f24c3803bc0ea6d4
|
1e74c6b35a4cf9d9b56a7172433c532abd3e5955
|
/LFs/__init__.py
|
d506dde046d84ed649de970d5fef5de11507bdb2
|
[] |
no_license
|
dsteam2021/snorkel_app
|
fbc9ddce7562953fdcc090cda4e76ef5f351b717
|
2dec23a9bb3064720216752edc35f222fcb50ffb
|
refs/heads/master
| 2023-07-15T10:55:42.821224
| 2021-08-27T04:34:08
| 2021-08-27T04:34:08
| 393,338,167
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 614
|
py
|
import os
import regex as re
import numpy as np
import pandas as pd
from unidecode import unidecode
from snorkel.labeling.apply.dask import PandasParallelLFApplier
from snorkel.labeling import labeling_function, PandasLFApplier, LFAnalysis
from snorkel.analysis import get_label_buckets
lfs = []
# n0_bat_dong_san.py -> n0_bat_dong_san
# lọc các file lfs để auto import
list_lfs = [i.split('.')[0] for i in os.listdir('LFs') if (i != 'util.py' and i[0] != '_' and i.split('.')[-1] == 'py')]
for i in list_lfs:
file_lfs = __import__('LFs.' + i, fromlist=['get_lfs'])
lfs.extend(file_lfs.get_lfs())
|
[
"tranhuuhuy297@gmail.com"
] |
tranhuuhuy297@gmail.com
|
a3f5430553d660e901a3ca13a353ab5b2be852d6
|
422dd5d3c48a608b093cbfa92085e95a105a5752
|
/students/stefanjp1/lesson06/calculator/divider.py
|
98c8637ce4e5f4c2efa63587991b974a17b30611
|
[] |
no_license
|
UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018
|
a2052fdecd187d7dd6dbe6f1387b4f7341623e93
|
b1fea0309b3495b3e1dc167d7029bc9e4b6f00f1
|
refs/heads/master
| 2021-06-07T09:06:21.100330
| 2019-11-08T23:42:42
| 2019-11-08T23:42:42
| 130,731,872
| 4
| 70
| null | 2021-06-01T22:29:19
| 2018-04-23T17:24:22
|
Python
|
UTF-8
|
Python
| false
| false
| 229
|
py
|
'''
Module to provide division functionality
'''
class Divider(object):
''' Division Operator Class '''
@staticmethod
def calc(operand_1, operand_2):
''' return a / b '''
return operand_1/operand_2
|
[
"stefanjp@gmail.com"
] |
stefanjp@gmail.com
|
20c499deff02d5afe014d4e8b4070e7cd1c9e86f
|
cac359ca1bc562ea58cfcc9550b9c5317567b028
|
/src/posts/forms.py
|
4bfa8f7ebb5aa4c8b729c7e68abbaff64f00794b
|
[] |
no_license
|
Ekluv/Social-Media-WebApp-Django
|
1b8f17ade1c55b191cf249470451326904fb9124
|
1194a8fd013d1e832ef02be7a24a6074fbbde210
|
refs/heads/master
| 2021-01-10T17:07:40.569535
| 2015-12-12T07:53:54
| 2015-12-12T07:54:09
| 47,636,962
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
from django import forms
from django.contrib.auth.models import User
from .models import Post,Like
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = ['title']
def clean_title(self):
title = self.cleaned_data.get('title')
return title
class LikeForm(forms.ModelForm):
class Meta:
model = Like
fields = ['voter','post']
exclude = ['voter','post']
|
[
"ekluv@live.com"
] |
ekluv@live.com
|
e7b7e896cef558deb4016b44df60bcae04522288
|
f66faf8003b73157b45e96cceffa9036bfb9bfc3
|
/chapter1/transposingVector.py
|
8f07fb43d041f30efccc128bc8e0b4fb1e33e4bc
|
[] |
no_license
|
anupkumarsahu/Machine_Learning_with_python_cookbook
|
eb5db923c005bf991143cfb8071077ebc676be90
|
1c67325280aa324b599941fefdaf82203caf8538
|
refs/heads/master
| 2020-03-22T10:07:14.635555
| 2018-08-02T17:16:21
| 2018-08-02T17:16:21
| 139,879,409
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 235
|
py
|
# Load library
import numpy as np
# Create matrix
matrix = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
# Transpose matrix
print(matrix.T)
# Transpose vector
print(np.array([1, 2, 3, 4, 5, 6]).T)
|
[
"anup-kumar.sahu@hpe.com"
] |
anup-kumar.sahu@hpe.com
|
0c3524ccd8f2582c4b9f9d5dac08f4c41406858a
|
cfee610219adff6e997893a760252b7a9b1a3b00
|
/fac_staff.py
|
6f33763e8674badd86424544ab815210d5f5d982
|
[] |
no_license
|
elylib/patron_uploads
|
8c2a98e23633920dc534208b23b4a7a2e9292a07
|
2f0c4bcd5213731b1bcc2f8739abee4274709148
|
refs/heads/master
| 2020-03-28T14:48:21.605055
| 2018-09-14T18:17:12
| 2018-09-14T18:17:12
| 148,524,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,285
|
py
|
"""
We get Faculty/Staff data from payroll to load into Worldshare Management Services.
Currently this file comes from Melissa Cody.
This code reads that file and transforms the data into the format WMS expects.
It also constructs university IDs and let's the user know if any conflicts exist,
i.e. John Smith and Jane Smith both would have university IDs of jsmith, so the
user can check the university directory to clear up these situations.
This script allows for a less error-prone, more efficient, and more consistent method of loading
Fac/Staff accounts in WMS each year.
"""
import csv
from pprint import pprint
import string
import openpyxl
from constants import OCLC_FIELDS
def make_wsu_username(first, last):
"""
Combine first and last names to make a WSU campus ID
WSU uses the first letter of your first name and your last name to construct IDs.
This gives us most of the names correctly, but you will have to manually check
some against the directory because sometimes there is overlap (i.e. Steve and Suzanne Adams)
and sometimes there is a space or punctuation in the last name that makes it reflect
differently. Also, some emails will be wrong because some groups, like IT, have
special email addresses. This has not traditionally been a problem, can revisit
if it becomes one.
"""
return first[0] + sanitize_last_name(last)
translator = str.maketrans('', '', string.punctuation)
def sanitize_last_name(last):
"""Get rid of spaces and punctuation to fit our typical user id rules"""
out = last.translate(translator)
return out.replace(' ', '')
def make_email(user):
return '{0}@westfield.ma.edu'.format(user)
def main(file_location, today):
year, month, day = today.year, today.month, today.day
sheet = openpyxl.load_workbook(file_location).active
with open(f'WEX_{year}_{month}_{day}_StaffPatrons_1_Tab_1.1.txt', 'w', newline="") as w:
"""
This is not necessarily the ideal way to go about this (lining up two lists
for field names and data rows), but this iteration is working. Some information
that is constant among users is hardcoded.
The WEX_2018_09_07_StaffPatrons_1_Tab_1.1.txt that is generated will need to be gone over to double check the rows with
user ids in double_check that are printed out at the end. These are accounts that are
likely to have errors because either a duplicate user id or a space in the last
name.
"""
writer = csv.writer(w, delimiter='\t')
writer.writerow(OCLC_FIELDS)
usernames = set()
barcodes = set()
double_check = []
for row in sheet.iter_rows(min_row=2, max_row=sheet.max_row):
row = [cell.value for cell in row]
if row[6] in barcodes:
# The file from payroll includes some people multiple times, i.e. if they teach in the
# day and CGCE units, they are listed as employees in two areas. This will skip them
# if we've already seen them.
continue
username = make_wsu_username(row[3].lower(), row[2].lower())
# All these empty strings are to make sure the format of the tsv is just so
# Should probably make the facstaff upload more like the student upload
expiration_date = today.replace(year=today.year + 3).strftime('%Y-%m-%d')
new_row = ['', row[3], '', row[2], '', '', '', '', '', '1410', row[6], username, 'urn:mace:oclc:idm:westfieldstatecollege:ldap',
'Faculty and Staff', '', expiration_date, '134347', row[4],
'577 Western Avenue', 'Westfield', 'MA', '01086', '', '', '', '', '', '', '', '', '',
make_email(username), '', '', '', '', '', '', '', '', '', '', '', '', '', '']
writer.writerow(new_row)
# This makes it so we can double check people who may be given an incorrect or duplicate
# idAtSource value, so we can double check them in the directory
if username in usernames or ' ' in row[2]:
double_check.append(username)
usernames.add(username)
barcodes.add(row[6])
pprint(double_check)
|
[
"hill.charles2@gmail.com"
] |
hill.charles2@gmail.com
|
9bffa2243c34caecd45580e6277415e8e49a06d4
|
a57a7604bf34f1d6bda29f797c275a441d2192f9
|
/request.py
|
0aa60f48cf20f557db5814b3fd52d477757a5538
|
[] |
no_license
|
ibaguio/magpy
|
289ad063278124b3ccef3ed5af238fa59d28e258
|
f34f6ead7f1415e001ea3a902d7e969f06f9ecbb
|
refs/heads/master
| 2020-03-11T07:48:59.924728
| 2018-04-17T07:52:19
| 2018-04-17T07:52:19
| 129,867,451
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,642
|
py
|
import os
import json
import logging
import requests
SANDBOX_URL = 'https://sandbox.api.magpie.im/v1'
PRODUCTION_URL = 'https://api.magpie.im/v1'
class MagpieRequest(object):
def __init__(self, is_sandbox=False, **kwargs):
self.is_sandbox = is_sandbox
self.pk = os.environ.get('MAGPIE_PUBLIC_KEY', kwargs.get('pk'))
self.sk = os.environ.get('MAGPIE_SECRET_KEY', kwargs.get('sk'))
if not self.pk or not self.sk:
raise Exception('Public or Secret Key not set in Environment')
elif 'test' in self.pk:
self.is_sandbox = True
self.url = SANDBOX_URL if is_sandbox else PRODUCTION_URL
self.session = requests.Session()
self.session.auth = (self.sk, '')
self.session.headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
self.logger = logging.getLogger('magpy.request')
def _process_response(self, response):
if response.status_code in [200, 201]:
return json.loads(response.content)
elif response.status_code == 401:
self.logger.error('Authorization error. Check your token')
print 'Authorization error. Check your token'
elif response.status_code == 402:
self.logger.error('Invalid card!')
print 'Invalid card!'
elif response.status_code == 404:
self.logger.error('Authorization error. Token not found')
print 'Authorization error. Token not found'
else:
self.logger.error('Invalid request')
print 'Invalid request'
|
[
"baguio.ivan@gmail.com"
] |
baguio.ivan@gmail.com
|
d358b439bce6a1153439fe7b5d3a260db780a5ed
|
e7ff78f98fb4fc8a61df797a1904ba800d8b812b
|
/day7/gameProcessor.py
|
6f18a1545f7fe89bf4117a4f099430700b306163
|
[] |
no_license
|
EvanFCMoses/adventofcode2020
|
13e6f0d6a552f12d9c177713db23bc327eff2e0b
|
25f9dcd0011d49320b2d3f06b1ab9a8506ce0ab4
|
refs/heads/master
| 2023-01-22T12:00:18.505814
| 2020-12-10T01:30:29
| 2020-12-10T01:30:29
| 318,029,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,745
|
py
|
class GameProcessor:
def __init__(self):
self.instructions = self.readInstructions('input.txt')
self.accumulator = 0
def readInstructions(self, fileName):
file = open(fileName, 'r')
array = []
for row in file:
array.append(Instruction(row))
return array
def executeInstruction(self, position):
print(position)
inst = self.instructions[position]
if inst.executed != 0:
return -1
if inst.instruction == "acc":
self.accumulator = self.accumulator + inst.number
inst.executed = inst.executed + 1
return position + 1
elif inst.instruction == "nop":
inst.executed = inst.executed + 1
return position + 1
elif inst.instruction == "jmp":
inst.executed = inst.executed + 1
return position + inst.number
else:
print("halt and catch fire")
def runInstructionsOnce(self, startingLocation):
execResult = self.executeInstruction(startingLocation)
while execResult != -1:
execResult = self.executeInstruction(execResult)
return execResult
def changeAnInstructionAndRun(self):
for x in range(len(0, self.instructions)):
if self.instructions[x].instruction == "jmp":
self.instructions[x] = self.instructions[x].replace("jmp","nop")
return ""
class Instruction:
def __init__(self, inputString):
self.instruction = inputString[0:3]
self.number = self.processRowNumber(inputString[4:])
self.executed = 0
def processRowNumber(self, rowNumber):
if rowNumber[0] == "+":
return int(rowNumber[1:])
else:
return int(rowNumber)
gameProcessor = GameProcessor()
gameProcessor.runInstructionsOnce(0)
# for inst in gameProcessor.instructions:
# print(inst.instruction)
# print(inst.number)
# print(inst.executed)
print(gameProcessor.accumulator)
|
[
"evanfcmoses@gmail.com"
] |
evanfcmoses@gmail.com
|
0e6bcee8fa2cbf16435bd7c5031d1e777fb2bd60
|
b8849355e1b4adabc6054477ec087745fd979c6d
|
/tools/DokuWatch/setup.py
|
764eedc80caf78e4e8e0f7a1913b777858c21dc5
|
[] |
no_license
|
LittleBuster/DokuMail
|
85a4a558f1a4b4fa2b6eedba48d6e466f709b8df
|
574d4ebf3b2ea77b9b874c640bf463ab20533974
|
refs/heads/master
| 2021-01-24T06:07:23.082725
| 2015-05-08T13:52:12
| 2015-05-08T13:52:12
| 21,331,417
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
import sys
from cx_Freeze import setup, Executable
base = None
if sys.platform == 'linux':
base = 'Console'
setup(name = 'DokuMail',
version = '0.0.1',
executables = [Executable('main.py', base=base)],
options = {'build_exe': {'includes': ['sip']}})
|
[
"denisov172@gmail.com"
] |
denisov172@gmail.com
|
f6be638237f9d4aeddf2bd844e81bbe67ff1f719
|
5f89f95fabb7a9d41d023ac1e7451fbf74c57c15
|
/Fonctionnels/Générateurs et Itérateurs/generateur_close() (5).py
|
54c2a50edabfe3c2aca3e939f6f211f465ca4e9c
|
[] |
no_license
|
MINCARELLI13/Fonctionnels
|
4ed5f439dae3032d5941eb0a61706ca472dd6a3c
|
d074c56bac0beebd118c525777e53be7035fd09a
|
refs/heads/master
| 2023-03-08T21:43:02.290575
| 2021-02-28T21:49:38
| 2021-02-28T21:49:38
| 343,183,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 619
|
py
|
#!/usr/bin/env python
print()
def generateur(debut, fin):
count = debut
while count < fin:
yield count
count += 1
intervalle = generateur(5, 20) # on crée le générateur "intervalle"
for element in intervalle:
print(element, end=" ")
if element >= 17:
intervalle.close() # interruption de la boucle
# pour appeler les méthodes du générateur, on doit le stocker dans une variable "intervalle" avant la boucle.
# Si on avait écrit directement "for nombre in generateur(5, 20)", on n'aurait pas pu appeler la méthode "close()" du générateur.
|
[
"sancho.poncho@laposte.net"
] |
sancho.poncho@laposte.net
|
f2cb6896b36c104fa99aebec1b5b6f318daaeca3
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/network/azure-mgmt-dns/generated_samples/delete_ptr_recordset.py
|
8e4ce27f33bed7dfe65428aac9d804dfa85506a6
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,566
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.dns import DnsManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-dns
# USAGE
python delete_ptr_recordset.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DnsManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.record_sets.delete(
resource_group_name="rg1",
zone_name="0.0.127.in-addr.arpa",
relative_record_set_name="1",
record_type="PTR",
)
print(response)
# x-ms-original-file: specification/dns/resource-manager/Microsoft.Network/stable/2018-05-01/examples/DeletePTRRecordset.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
994d32bf85b9f805c0e419fe5ccaa4d5e1bfce35
|
439d2d30b61514bd6a9072c175b124c63f124834
|
/data_statistics/base/index.py
|
e42fe210e0b49d548bb2b143113f01362df60b73
|
[] |
no_license
|
meteor-gogogo/python
|
6f92e3a680c653f92a8d9b04a2d09be6d6ea126a
|
7d03e2f57d4dfb7c7aeb15bf836a6e0d4af9b89d
|
refs/heads/master
| 2022-12-10T07:45:31.085951
| 2019-09-04T06:01:50
| 2019-09-04T06:01:50
| 197,333,904
| 0
| 1
| null | 2022-12-08T05:19:07
| 2019-07-17T07:01:55
|
Python
|
UTF-8
|
Python
| false
| false
| 190
|
py
|
#!/usr/bin/env python
# coding=utf-8
from .baselist import BaseList
class IndexList(BaseList):
@classmethod
def is_registrar_for(cls, listtype):
return listtype == 'index'
|
[
"liuhang@aplum.com.cn"
] |
liuhang@aplum.com.cn
|
af55d24daf0a5c4aebe8237d7030069a16e8052d
|
01548099ec20976d31cca7a720102c11c56fc9be
|
/scripts/handle_request.py
|
d1e6caf1c8ee8b338b8fd6985490f517befa6a56
|
[] |
no_license
|
OMEN001/Lemon_Api_Test
|
6a27a9a9ccf28623006b465a107d53b17ad30404
|
373c9f1a1f1f3160bbe8edcc4b5740f9779947ae
|
refs/heads/master
| 2023-02-25T22:31:39.824908
| 2021-01-24T14:22:52
| 2021-01-24T14:22:52
| 329,324,658
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,664
|
py
|
# -*- coding: utf-8 -*-
# @Time : BaLiang
# @Author : 86187
import requests
import json
class HandleRequest:
"""
如果是session会话认证requests请求不能模仿浏览器维护cookie,创建session会话对象可以解决这一问题
"""
def __init__(self):
# 创建回话对象,通过回话对象向服务器发起请求
self.one_session = requests.Session()
# 添加请求头
def add_headers(self, header):
# 构造请求头
return self.one_session.headers.update(header)
def send(self, url, method="post", data=None, is_json=True, **kwargs):
"""
发起请求
:param url: url地址
:param method: 请求方法, 通常为get、post、put、delete、patch
:param data: 传递的参数, 可以传字典、json格式的字符串、字典类型的字符串, 默认为None
:param is_json: 是否以json的形式来传递参数, 如果为True, 则以json形式来传, 如果为False则以www-form形式来传, 默认为True
:param kwargs: 可变参数, 可以接收关键字参数, 如headers、params、files等
:return: None 或者 Response对象
"""
# data可以为如下三种类型:
# data = {"name": '可优', 'gender': True} # 字典类型
# data = '{"name": "可优", "gender": true}' # json格式的字符串
# data = "{'name': '优优', 'gender': True}" # 字典类型的字符串
if isinstance(data, str): #判断data是否为str字符串类型, 如果为str类型, 会返回True, 否则返回False
try:
# 假设为json字符串, 先使用json.loads转化为字典
data = json.loads(data)
except Exception as e:
# 如果不为json字符串会抛出异常, 然后使用eval函数来转化
print("使用日志记录")
data = eval(data)
# 转换字符串中所有大写字符为小写
method = method.lower()
if method == "get":
# 如果为get请求, 那么传递的data, 默认传查询字符串参数
# res = self.one_session.get(url, params=data, **kwargs)
res = self.one_session.request(method,url, params=data, **kwargs)
elif method in ("post", "delete", "patch","put"):
if is_json: # 如果is_json为True, 那么以json格式的形式来传参
# res = self.one_session.post(url, json=data, **kwargs)
res = self.one_session.request(method,url, json=data, **kwargs)
else: # 如果is_json为False, 那么以www-form的形式来传参
# res = self.one_session.post(url, data=data, **kwargs)
res = self.one_session.request(method,url, data=data, **kwargs)
else:
res = None
print(f"不支持【{method}】的请求方法")
return res
def close(self):
# 关闭回话对象
self.one_session.close()
# do_request = HandleRequest()
# if __name__ == '__main__':
# # 1. 构造请求的url
# login_url = "http://api.lemonban.com/futureloan/member/login"
#
# # 2. 创建请求参数
# headers = {
# "User-Agent": "Mozilla/5.0 BaLiang",
# "X-Lemonban-Media-Type": "lemonban.v2"
# }
#
# login_params = {
# "mobile_phone": "18244446667",
# "pwd": "12345678",
# }
#
# # 3. 执行登录
# do_request = HandleRequest() # 创建HandleRequest对象
# do_request.add_headers(headers) # 添加公共请求头
# login_res = do_request.send(login_url, data=login_params)
|
[
"1668317403@qq.com"
] |
1668317403@qq.com
|
a6c44e285c3f0befd0a72c4a0e406c390f91eb8b
|
27e4de1b3c2693c9dc6de4dca2db5d9f8f569ec2
|
/Interfaces/AI/Inventory.py
|
ee072e20a3508a6f4a70f27ac98541befdde4edb
|
[] |
no_license
|
Ipukema/PokeStats
|
2d2becf48af285cbae1ed5e054c2ff5bc3ad08a5
|
727ca7b72dcadc6c15f2afd51459312181a6fd2a
|
refs/heads/master
| 2021-01-22T15:01:12.892842
| 2016-08-05T10:35:51
| 2016-08-05T10:35:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,183
|
py
|
# -*- coding: utf-8 -*-
import logging
from enum import Enum
log = logging.getLogger(__name__)
class InventoryItem(Enum):
ITEM_UNKNOWN = 0
ITEM_POKE_BALL = 1
ITEM_GREAT_BALL = 2
ITEM_ULTRA_BALL = 3
ITEM_MASTER_BALL = 4
ITEM_POTION = 101
ITEM_SUPER_POTION = 102
ITEM_HYPER_POTION = 103
ITEM_MAX_POTION = 104
ITEM_REVIVE = 201
ITEM_MAX_REVIVE = 202
ITEM_LUCKY_EGG = 301
ITEM_INCENSE_ORDINARY = 401
ITEM_INCENSE_SPICY = 402
ITEM_INCENSE_COOL = 403
ITEM_INCENSE_FLORAL = 404
ITEM_TROY_DISK = 501
ITEM_X_ATTACK = 602
ITEM_X_DEFENSE = 603
ITEM_X_MIRACLE = 604
ITEM_RAZZ_BERRY = 701
ITEM_BLUK_BERRY = 702
ITEM_NANAB_BERRY = 703
ITEM_WEPAR_BERRY = 704
ITEM_PINAP_BERRY = 705
ITEM_SPECIAL_CAMERA = 801
ITEM_INCUBATOR_BASIC_UNLIMITED = 901
ITEM_INCUBATOR_BASIC = 902
ITEM_POKEMON_STORAGE_UPGRADE = 1001
ITEM_ITEM_STORAGE_UPGRADE = 1002
class Inventory:
def __init__(self, thread):
self.scanner = thread.scanner
self.session = thread.session
self.api = thread.api
self.inventory = list()
def update(self):
log.info("Обновляем данные сундука")
self.api.get_player().get_inventory()
response_dict = self.api.call()
if response_dict and 'status_code' in response_dict:
if response_dict['status_code'] is 1:
if 'responses' in response_dict:
if 'GET_INVENTORY' in response_dict['responses']:
if 'inventory_delta' in response_dict['responses']['GET_INVENTORY']:
inventory_res = response_dict['responses']['GET_INVENTORY']['inventory_delta']['inventory_items']
pokecount = 0
itemcount = 1
for item in inventory_res:
try:
if 'inventory_item_data' in item:
if 'pokemon_data' in item['inventory_item_data']:
pokecount = pokecount + 1
if 'item' in item['inventory_item_data']:
if 'count' in item['inventory_item_data']['item']:
itemcount = itemcount + item['inventory_item_data']['item']['count']
except Exception as e:
log.error("Ошибка:{0}".format(e))
try:
if 'inventory_item_data' in item:
if 'player_stats' in item['inventory_item_data']:
playerdata = item['inventory_item_data']['player_stats']
if 'level' in playerdata: self.scanner.account.statistic.level = playerdata['level']
if 'experience' in playerdata: self.scanner.account.statistic.experience = playerdata['experience']
if 'next_level_xp' in playerdata and 'experience' in playerdata: self.scanner.account.statistic.experience_to_level = (int(playerdata.get('next_level_xp', 0)) -int(playerdata.get('experience', 0)))
if 'pokemons_captured' in playerdata: self.scanner.account.statistic.catched_pokemons = playerdata['pokemons_captured']
if 'poke_stop_visits' in playerdata: self.scanner.account.statistic.visited_pokestops = playerdata['poke_stop_visits']
#if 'km_walked' in playerdata: self.scanner.account.statistic.walked = playerdata['km_walked']
except Exception as e:
log.error("Ошибка:{0}".format(e))
try:
if 'inventory_item_data' in item:
if 'item' in item['inventory_item_data']:
try:
self.inventory.append(item['inventory_item_data']['item'])
except Exception as e:
log.error("Ошибка:{0}".format(e))
if 'item_id' in item['inventory_item_data']['item'] and 'count' in item['inventory_item_data']['item']:
item_id = item['inventory_item_data']['item']['item_id']
item_count = item['inventory_item_data']['item']['count']
if item_id == InventoryItem.ITEM_POKE_BALL: self.scanner.account.statistic.item_ball_poke = item_count
if item_id == InventoryItem.ITEM_GREAT_BALL: self.scanner.account.statistic.item_ball_great = item_count
if item_id == InventoryItem.ITEM_ULTRA_BALL: self.scanner.account.statistic.item_ball_ultra = item_count
if item_id == InventoryItem.ITEM_MASTER_BALL: self.scanner.account.statistic.item_ball_master = item_count
if item_id == InventoryItem.ITEM_POTION: self.scanner.account.statistic.item_potion = item_count
if item_id == InventoryItem.ITEM_SUPER_POTION: self.scanner.account.statistic.item_potion_super = item_count
if item_id == InventoryItem.ITEM_HYPER_POTION: self.scanner.account.statistic.item_potion_hyper = item_count
if item_id == InventoryItem.ITEM_MAX_POTION: self.scanner.account.statistic.item_potion_master = item_count
if item_id == InventoryItem.ITEM_REVIVE: self.scanner.account.statistic.item_revive = item_count
if item_id == InventoryItem.ITEM_MAX_REVIVE: self.scanner.account.statistic.item_revive_master = item_count
if item_id == InventoryItem.ITEM_RAZZ_BERRY: self.scanner.account.statistic.item_berry_razz = item_count
if item_id == InventoryItem.ITEM_BLUK_BERRY: self.scanner.account.statistic.item_berry_bluk = item_count
if item_id == InventoryItem.ITEM_NANAB_BERRY: self.scanner.account.statistic.item_berry_nanab = item_count
if item_id == InventoryItem.ITEM_WEPAR_BERRY: self.scanner.account.statistic.item_berry_wepar = item_count
if item_id == InventoryItem.ITEM_PINAP_BERRY: self.scanner.account.statistic.item_berry_pinap = item_count
except Exception as e:
log.error("Ошибка:{0}".format(e))
self.scanner.account.statistic.bag_pokemons = pokecount
self.scanner.account.statistic.bag_items = itemcount
self.session.commit()
else:
log.warning("Получен неверный статус: {0}".format(response_dict['status_code']))
def pokeball(self):
self.update()
balls_stock = {1: 0, 2: 0, 3: 0, 4: 0}
for item in self.inventory:
# print(item['inventory_item_data']['item'])
item_id = int(item['item_id'])
item_count = int(item['count'])
if item_id == 1:
# print('Poke Ball count: ' + str(item_count))
balls_stock[1] = item_count
if item_id == 2:
# print('Great Ball count: ' + str(item_count))
balls_stock[2] = item_count
if item_id == 3:
# print('Ultra Ball count: ' + str(item_count))
balls_stock[3] = item_count
if item_id == 4:
# print('Ultra Ball count: ' + str(item_count))
balls_stock[4] = item_count
return balls_stock
def drop_item(self, item_id, count):
self.api.recycle_inventory_item(item_id=item_id, count=count)
response_dict = self.api.call()
if response_dict and 'status_code' in response_dict:
if response_dict['status_code'] is 1:
if 'responses' in response_dict:
if 'RECYCLE_INVENTORY_ITEM' in response_dict['responses']:
if 'status' in response_dict['responses']['RECYCLE_INVENTORY_ITEM']:
if response_dict['responses']['RECYCLE_INVENTORY_ITEM']['status'] is 1:
return True
else:
log.warning("Получен неверный статус: {0}".format(response_dict['responses']['RECYCLE_INVENTORY_ITEM']['status']))
else:
log.warning("Получен неверный статус: {0}".format(response_dict['status_code']))
return False
def recycle(self):
for item in self.inventory:
if "item_id" in item:
item_db = self.scanner.account.statistic.get_by_item_id(int(item["item_id"]))
if 'count' in item:
if item['count'] > item_db[1]:
log.info("Membership {0} is overdraft, drop {1} items".format(item["item_id"], (item['count']-item_db[1])))
if not self.drop_item(item["item_id"], (item['count']-item_db[1])):
log.warning("Неудалось удалить обьекты из инвентаря")
self.update()
|
[
"viktor@tatarnikov.org"
] |
viktor@tatarnikov.org
|
38737226b6765f186c820e52fbc945e19418cc57
|
06f14e666306a99daa7fa0b3fb5ea1487521c2ad
|
/Project UAS/PyGame/CodeMentah.py
|
362c423043cd3f92ff6564b5e240edcd3bbec819
|
[] |
no_license
|
iluljr/CatatanPBO
|
58d193d13da89b9aa46eb6b8a0e0dbba261628da
|
d9933a49369ff56ac3067631a6ef027e7036ce23
|
refs/heads/master
| 2020-04-02T04:54:00.881756
| 2018-12-22T13:35:17
| 2018-12-22T13:35:17
| 154,041,727
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,579
|
py
|
import pygame
# Global constants
# Colors
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
BLUE = (0, 0, 255)
# Screen dimensions
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
class Player(pygame.sprite.Sprite):
""" This class represents the bar at the bottom that the player
controls. """
# -- Methods
def __init__(self):
""" Constructor function """
# Call the parent's constructor
super().__init__()
# Create an image of the block, and fill it with a color.
# This could also be an image loaded from the disk.
width = 40
height = 60
self.image = pygame.Surface([width, height])
self.image.fill(RED)
# Set a referance to the image rect.
self.rect = self.image.get_rect()
# Set speed vector of player
self.change_x = 0
self.change_y = 0
# List of sprites we can bump against
self.level = None
def update(self):
""" Move the player. """
# Gravity
self.calc_grav()
# Move left/right
self.rect.x += self.change_x
# See if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# If we are moving right,
# set our right side to the left side of the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
elif self.change_x < 0:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
# Move up/down
self.rect.y += self.change_y
# Check and see if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
elif self.change_y < 0:
self.rect.top = block.rect.bottom
# Stop our vertical movement
self.change_y = 0
def calc_grav(self):
""" Calculate effect of gravity. """
if self.change_y == 0:
self.change_y = 1
else:
self.change_y += .35
# See if we are on the ground.
if self.rect.y >= SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:
self.change_y = 0
self.rect.y = SCREEN_HEIGHT - self.rect.height
def jump(self):
""" Called when user hits 'jump' button. """
# move down a bit and see if there is a platform below us.
# Move down 2 pixels because it doesn't work well if we only move down
# 1 when working with a platform moving down.
self.rect.y += 2
platform_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)
self.rect.y -= 2
# If it is ok to jump, set our speed upwards
if len(platform_hit_list) > 0 or self.rect.bottom >= SCREEN_HEIGHT:
self.change_y = -10
# Player-controlled movement:
def go_left(self):
""" Called when the user hits the left arrow. """
self.change_x = -6
def go_right(self):
""" Called when the user hits the right arrow. """
self.change_x = 6
def stop(self):
""" Called when the user lets off the keyboard. """
self.change_x = 0
class Platform(pygame.sprite.Sprite):
""" Platform the user can jump on """
def __init__(self, width, height):
""" Platform constructor. Assumes constructed with user passing in
an array of 5 numbers like what's defined at the top of this
code. """
super().__init__()
self.image = pygame.Surface([width, height])
self.image.fill(GREEN)
self.rect = self.image.get_rect()
class Level(object):
""" This is a generic super-class used to define a level.
Create a child class for each level with level-specific
info. """
def __init__(self, player):
""" Constructor. Pass in a handle to player. Needed for when moving platforms
collide with the player. """
self.platform_list = pygame.sprite.Group()
self.enemy_list = pygame.sprite.Group()
self.player = player
# Background image
self.background = None
# Update everythign on this level
def update(self):
""" Update everything in this level."""
self.platform_list.update()
self.enemy_list.update()
def draw(self, screen):
""" Draw everything on this level. """
# Draw the background
screen.fill(BLUE)
# Draw all the sprite lists that we have
self.platform_list.draw(screen)
self.enemy_list.draw(screen)
# Create platforms for the level
class Level_01(Level):
""" Definition for level 1. """
def __init__(self, player):
""" Create level 1. """
# Call the parent constructor
Level.__init__(self, player)
# Array with width, height, x, and y of platform
level = [[210, 70, 500, 500],
[210, 70, 200, 400],
[210, 70, 600, 300],
[210, 70, 200, 50],
[70, 20, 400, 150],
]
# Go through the array above and add platforms
for platform in level:
block = Platform(platform[0], platform[1])
block.rect.x = platform[2]
block.rect.y = platform[3]
block.player = self.player
self.platform_list.add(block)
def main():
""" Main Program """
pygame.init()
# Set the height and width of the screen
size = [SCREEN_WIDTH, SCREEN_HEIGHT]
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Platformer Jumper")
# Create the player
player = Player()
# Create all the levels
level_list = []
level_list.append( Level_01(player) )
# Set the current level
current_level_no = 0
current_level = level_list[current_level_no]
active_sprite_list = pygame.sprite.Group()
player.level = current_level
player.rect.x = 340
player.rect.y = SCREEN_HEIGHT - player.rect.height
active_sprite_list.add(player)
# Loop until the user clicks the close button.
done = False
# Used to manage how fast the screen updates
clock = pygame.time.Clock()
# -------- Main Program Loop -----------
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player.go_left()
if event.key == pygame.K_RIGHT:
player.go_right()
if event.key == pygame.K_UP:
player.jump()
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT and player.change_x < 0:
player.stop()
if event.key == pygame.K_RIGHT and player.change_x > 0:
player.stop()
# Update the player.
active_sprite_list.update()
# Update items in the level
current_level.update()
# If the player gets near the right side, shift the world left (-x)
if player.rect.right > SCREEN_WIDTH:
player.rect.right = SCREEN_WIDTH
# If the player gets near the left side, shift the world right (+x)
if player.rect.left < 0:
player.rect.left = 0
# ALL CODE TO DRAW SHOULD GO BELOW THIS COMMENT
current_level.draw(screen)
active_sprite_list.draw(screen)
# ALL CODE TO DRAW SHOULD GO ABOVE THIS COMMENT
# Limit to 60 frames per second
clock.tick(60)
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
iluljr.noreply@github.com
|
ea2054eb337966fb0493a467059ba157e75ea7e1
|
cc2ddb1220f185658b58e7ac705b794576e1ecc1
|
/avantTrainee/src/test3/scripts/listener.py
|
ff613dde8a2e0d5bfd22ca431a723963f05cd4a5
|
[] |
no_license
|
BielLopes/myROSTeaching
|
cfc23c2986bf4fe162720ac5899a48b8aef37e6b
|
bd89f573461397d3d8ebd6f76aaa3e419a7cfd54
|
refs/heads/master
| 2023-02-28T20:07:23.435810
| 2021-02-07T17:40:56
| 2021-02-07T17:40:56
| 336,846,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,411
|
py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
## Simple talker demo that listens to std_msgs/Strings published
## to the 'chatter' topic
import rospy
from test3.msg import Num
def callback(data):
rospy.loginfo(rospy.get_caller_id() + ' I heard the number: %s', data.num)
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('listener', anonymous=False)
rospy.Subscriber('chatter', Num, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
|
[
"gabriel.galoma@gmail.com"
] |
gabriel.galoma@gmail.com
|
c6089ccaa1b6c92062efbfe48de53c909a29e87f
|
b9f97b3457100ffa978e5b9c659ff5b4a0189a7c
|
/django_summernote/migrations/0001_initial.py
|
bb4366437ddb498ed0c11524070afbc9688d6316
|
[
"MIT"
] |
permissive
|
yuong1979/django-summernote
|
e1cf974daa2b4cdffd251e9f82bd50049a864ab9
|
cf9b2d838310211738162c2acbc7b9145d3970b2
|
refs/heads/master
| 2020-03-19T09:02:22.831819
| 2018-06-06T02:15:09
| 2018-06-06T02:15:09
| 136,255,223
| 0
| 0
|
MIT
| 2018-06-06T02:15:10
| 2018-06-06T01:34:28
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 821
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_summernote.settings
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255, null=True, blank=True)),
('file', models.FileField(upload_to=django_summernote.settings.uploaded_filepath)),
('uploaded', models.DateTimeField(auto_now_add=True)),
],
options={
'abstract': False,
},
bases=(models.Model,),
),
]
|
[
"ez@smartstudy.co.kr"
] |
ez@smartstudy.co.kr
|
730329994488dcd29e2316dd9aa2c78c5f1252ac
|
386b7c61cd8a8c981fd32e73b64352b0fba7d53e
|
/Problem-2-search-in-a-rotated-sorted-array.py
|
003770bd11cc341b039ed1787db705e4edcdf3e9
|
[] |
no_license
|
negacy/problems-vs-aglorithms
|
9c13ba51a514617ce21b3493f625f78a93df2340
|
b7af3b499e4261db67dcf6adfd34920cd6ff6503
|
refs/heads/master
| 2020-07-10T15:30:36.875058
| 2019-09-13T22:17:45
| 2019-09-13T22:17:45
| 204,299,421
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
import sys
idx = -1
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
Args:
input_list(array), number(int): Input array to search and the target
Returns:
int: Index or -1
"""
start_index = 0
end_index = len(input_list) - 1
while start_index <= end_index:
mid_idx = (start_index + end_index)//2
left = input_list[:mid_idx]
right = input_list[mid_idx:]
try:
if input_list[mid_idx] == number:
return mid_idx
found = False
if number >= left[0] and number <= left[-1]:
start_index = 0
end_index = mid_idx - 1
found = True
else:# number >= right[0] and number <= right[-1]:
start_index = mid_idx + 1
if not found:
if number >= right[0] and number <= right[-1]:
start_index = mid_idx + 1
else:
end_index = mid_idx + 1
except IndexError: #if element not found, return -1
return -1
return -1
def linear_search(input_list, number):
for index, element in enumerate(input_list):
if element == number:
return index
return -1
def test_function(test_case):
input_list = test_case[0]
number = test_case[1]
if linear_search(input_list, number) == rotated_array_search(input_list, number):
print("Pass")
else:
print("Fail")
test_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 6])
test_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 1])
test_function([[6, 7, 8, 1, 2, 3, 4], 8])
test_function([[6, 7, 8, 1, 2, 3, 4], 1])
test_function([[6, 7, 8, 1, 2, 3, 4], 10])
#test empty cases
test_function([[], 10])
|
[
"negacy.hailu@gmail.com"
] |
negacy.hailu@gmail.com
|
c4881b4e1b29b1b0197288c5308680c3ff54e274
|
83170ff441a6722db40a6556587593f64c62dd48
|
/src/main/python/json2pandasjs.py
|
c7cc28585ff0f3ec6ba05e0f0d8b6c11ffddc391
|
[] |
no_license
|
hcy7910/json2pandas
|
6bc7b1a8fdb6de006a88c38aa186f63f748947b8
|
53c586c4dfd1f0393cb06fd31a3b686613829d9b
|
refs/heads/master
| 2021-09-04T08:13:26.606016
| 2018-01-17T07:45:10
| 2018-01-17T07:45:10
| 114,956,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# -*- coding:utf-8 -*-
import pandas as pd
import json
import re
def json2pandasjs(file):
df = pd.DataFrame()
with open(file) as f:
line = f.readline()
while line:
strinfo = re.compile('null')
datare = strinfo.sub('None', line)
data = eval(datare)
tmp = pd.DataFrame(data=data, index=range(1))
df = df.append(tmp)
line = f.readline()
df = df.set_index(df['first_visit_time'])
df = df.sort_index(ascending=False)
return df
|
[
"1289630384@qq.com"
] |
1289630384@qq.com
|
29fd856261e0af5f9622f6056ef4e5c961b85144
|
22bba1a2e94794c8d84f6291daa5f70b1bde400a
|
/record_wav.py
|
301668b1a9208cea4dcf664189717f0b34400468
|
[] |
no_license
|
CHANShu0508/Record-wav-and-FFT
|
75bfbc798684963c1fce1ee633223f62d77d9fef
|
c5201390d58cbe98e69faf6dd8b043b6bbf2219b
|
refs/heads/master
| 2023-08-13T23:51:20.939373
| 2021-10-12T10:21:12
| 2021-10-12T10:21:12
| 416,157,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
# -*- coding: utf-8 -*-
'''
Record the audio file as .wav format
'''
import pyaudio
import wave
class Record:
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per second
channel = 2
sample_rate = 44100 # Record 44100 samples per second
duration = 3
filename = "output.wav"
def __init__(self, time, file_name):
self.duration = time
self.filename = file_name
def start_record(self):
p = pyaudio.PyAudio() #Create an interface to PortAudio
print('* Recording')
stream = p.open(format=self.sample_format,
channels=self.channel,
rate=self.sample_rate,
frames_per_buffer=self.chunk,
input=True
)
frames = [] # Init an array to store frames
for index in range(0, int(self.sample_rate / self.chunk * self.duration)):
data = stream.read(self.chunk)
frames.append(data)
# Stop and close the stream
stream.stop_stream()
stream.close()
# Stop the PortAudio interface
p.terminate()
print('* Stop recording')
wf = wave.open(self.filename, 'wb')
wf.setnchannels(self.channel)
wf.setsampwidth(p.get_sample_size(self.sample_format))
wf.setframerate(self.sample_rate)
wf.writeframes(b''.join(frames))
wf.close()
|
[
"chanshuwork@gmail.com"
] |
chanshuwork@gmail.com
|
b176b9733e8a18e756e59c6e0349ac7bb3faf6f8
|
d39bbe4982c51a82d9f50650aba14f86042d3d30
|
/src/alg/qca.py
|
48b5d96a42d395bad7d3b73aa806f845722fa9e7
|
[] |
no_license
|
zyymax/online_community_detection
|
55dd75266bd53a62aaa6a744c456a1e215980a7a
|
77ea79255c217c1df99e690660969a83b9fbb346
|
refs/heads/master
| 2021-07-14T05:12:27.542175
| 2017-10-19T09:25:47
| 2017-10-19T09:25:47
| 107,429,321
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,998
|
py
|
#!/usr/bin/env python
# -*-coding:utf8-*-
"""
Coder: max.zhang
Date: 2015-01-23
Desc: implementation of QCA in INFOCOM'11
Adaptive Algorithms for Detecting Community Structure in Dynamic Social Networks
by Nam P. Nguyen et.al.
"""
from pprint import pprint
from collections import defaultdict
import random
import itertools
from copy import deepcopy as dcp
import sys
from time import clock
from network import Network, timing
from community import WeightCommunity as Community
from node import DisjointNode as Node
import cProfile
gene_pair = lambda l: list(
itertools.chain.from_iterable(
[zip(l[:step+1], l[-step-1:])
for step in xrange(len(l)-1)]))
def randList(ori_data):
random.seed(None)
ran_data = dcp(ori_data)
size = len(ran_data)
for idx1 in xrange(size):
idx2 = random.randint(idx1, size-1)
ran_data[idx1], ran_data[idx2] = ran_data[idx2], ran_data[idx1]
return ran_data
class QCAImp(Network):
def __init__(self):
Network.__init__(self)
self._m = 0.
def _overlap_modul(self):
m = 0.
for node1 in self._node_list:
for node2_idx, weight in node1._nb.items():
m += weight
m /= 2.
modul = 0.
for com_idx, com in enumerate(self._com_list):
if com is None:
continue
tmp_modul = 0
for node1_idx in com._node.keys():
node1 = self._node_list[node1_idx]
ov = 1.
for node2_idx in com._node.keys():
node2 = self._node_list[node2_idx]
ow = 1.
weight = node1._nb.get(node2_idx, 0)
tmp_modul += (weight-node1._du*node2._du/(2*m*ov*ow))
modul += tmp_modul
return modul/(2*m)
def _modularity(self, comno_set=None):
if self._m < 1e-6:
return 0.
if comno_set is None:
comno_set = filter(
lambda idx: self._com_list[idx] is not None,
range(self._com_num))
else:
comno_set = set(comno_set)
modul = 0.
for com_no in comno_set:
com = self._com_list[com_no]
modul += com._win/self._m - pow(com._wtot/(2*self._m), 2)
return modul
def _getNodeIdx(self, node_str):
if node_str in self._nodestr_dict:
node_idx = self._nodestr_dict[node_str]
if self._node_list[node_idx] is None:
self._node_list[node_idx] = Node(node_idx, node_str)
else:
node_idx = self._node_num
self._nodestr_dict[node_str] = node_idx
node = Node(node_idx, node_str)
self._node_list.append(node)
return node_idx, self._node_list[node_idx]
def _nr_comno_set(self, node_idx):
com_set = set()
node = self._node_list[node_idx]
for nb_idx in node._nb.keys():
com_idx = self._node_list[nb_idx]._com
if com_idx != node._com:
com_set.add(com_idx)
return com_set
def _buildN2C(self, node_idx):
tot_weight = 0.
node = self._node_list[node_idx]
n2c_dict = defaultdict(float)
for nb_idx, weight in self._node_list[node_idx]._nb.items():
com_idx = self._node_list[nb_idx]._com
n2c_dict[com_idx] += weight
return n2c_dict
def _in_force(self, node_du, com_wtot, n2c_weight):
return n2c_weight - node_du*(com_wtot-node_du)/(2*self._m)
def _out_force(self, node_du, com_wtot, n2c_weight):
return n2c_weight - node_du*com_wtot/(2*self._m)
def _nrtCom(self, node_idx, comno_set=None):
if comno_set is None:
comno_set = set(filter(lambda idx: self._com_list is not None,
range(self._com_num)))
else:
comno_set = set(comno_set)
node = self._node_list[node_idx]
n2c_dict = self._buildN2C(node_idx)
max_force = self._in_force(node._du, self._com_list[node._com]._wtot, n2c_dict[node._com])
nrtcom_idx = node._com
for com_idx in comno_set:
tmp_force = self._out_force(node._du, self._com_list[com_idx]._wtot, n2c_dict[com_idx])
if tmp_force > max_force:
max_force = tmp_force
nrtcom_idx = com_idx
return max_force, nrtcom_idx
"""
Set of nodes in one community
"""
def _nrtComofNodeSet(self, nodeidx_set, ori_comidx, comno_set=None):
if comno_set is None:
comno_set = set(filter(lambda idx: self._com_list is not None,
range(self._com_num)))
else:
comno_set = set(comno_set)
n2c_dict = defaultdict(float)
node_du = 0.
for node_idx in nodeidx_set:
node = self._node_list[node_idx]
node_du += node._du
for nnode_idx, weight in node._nb.items():
com_idx = self._node_list[nnode_idx]._com
n2c_dict[com_idx] += weight
max_force = self._in_force(node_du, self._com_list[ori_comidx]._wtot, n2c_dict[ori_comidx])
nrtcom_idx = ori_comidx
for com_idx in comno_set:
tmp_force = self._out_force(node_du, self._com_list[com_idx]._wtot, n2c_dict[com_idx])
if tmp_force > max_force:
max_force = tmp_force
nrtcom_idx = com_idx
# print '_nrtComofNodeSet', nodeidx_set, ori_comidx, comno_set, max_force, nrtcom_idx
return max_force, nrtcom_idx
def _detBestCom(self, node):
node_idx = node._id
comno_set = self._nr_comno_set(node_idx)
if len(comno_set) == 0:
return None, None
max_force, nrt_comidx = self._nrtCom(node_idx, comno_set)
ori_comidx = node._com
if nrt_comidx != ori_comidx:
ori_com = self._com_list[node._com]
best_com = self._com_list[nrt_comidx]
best_com.addNode(node)
node.setCom(nrt_comidx)
ori_com.delNode(node_idx)
if ori_com._empty:
self._com_list[ori_com._id] = None
return ori_comidx, nrt_comidx
"""
Set of nodes in one community
"""
def _detBestComofNodeSet(self, nodeidx_set, ori_comidx):
comno_set = set()
for node_idx in nodeidx_set:
comno_set.update(self._nr_comno_set(node_idx))
if len(comno_set) == 0:
return
max_force, nrt_comidx = self._nrtComofNodeSet(nodeidx_set, ori_comidx, comno_set)
if nrt_comidx != ori_comidx:
ori_com = self._com_list[ori_comidx]
for node_idx in nodeidx_set:
node = self._node_list[node_idx]
best_com = self._com_list[nrt_comidx]
best_com.addNode(node)
node.setCom(nrt_comidx)
ori_com.delNode(node_idx)
if ori_com._empty:
self._com_list[ori_com._id] = None
return nrt_comidx
# print '_detBestComofNodeSet', nodeidx_set, max_force, nrt_comidx
"""
Delta modularity of putting src_node into dst_com
self._m, src_node._du, dst_com._tot not updated
param node_idx: node
param dst_comidx: dest community
param weight: delta weight
"""
def _delta_q(self, node_idx, dst_comidx, weight):
node = self._node_list[node_idx]
src_com = self._com_list[node._com]
dst_com = self._com_list[dst_comidx]
n2c_dict = self._buildN2C(node_idx)
src_n2c = n2c_dict[src_com._id]
dst_n2c = n2c_dict[dst_comidx]
src_tot = src_com._wtot
dst_tot = dst_com._wtot
return 4*(self._m+weight)*(dst_n2c+weight-src_n2c)+src_n2c*(
2*dst_tot-2*node._du-src_n2c)-2*(node._du+weight)*(
node._du+weight+dst_tot-src_tot)
"""
Delta modularity of putting src_node into dst_com
self._m, src_node._du already updated
param node_idx: node
param dst_comidx: dest community
"""
def _extract_delta_q(self, srcnode_idx, dst_comidx):
return self._delta_q(srcnode_idx, dst_comidx, 0.)
def _findCliq(self, node, node_set):
for nnode1_idx in node._nb.keys():
if nnode1_idx not in node_set:
continue
nnode1 = self._node_list[nnode1_idx]
for nnode2_idx in nnode1._nb.keys():
if nnode2_idx not in node_set:
continue
return nnode1_idx, nnode2_idx
return None
def _cliqPerco(self, cliq, node_set):
tmp_set = dcp(node_set)
for node3_idx in tmp_set:
if node3_idx in cliq:
continue
node3 = self._node_list[node3_idx]
for node1_idx, node2_idx in gene_pair(list(cliq)):
node2 = self._node_list[node2_idx]
if node1_idx in node2._nb and node1_idx in node3._nb and node2_idx in node3._nb:
cliq.add(node3_idx)
node_set.remove(node3_idx)
self._cliqPerco(cliq, node_set)
break
@timing
def addNode(self, new_node_str, edge_dict):
new_nodeidx, new_node = self._getNodeIdx(new_node_str)
com = Community(self._com_num)
self._com_list.append(com)
com.addNode(new_node)
new_node.setCom(com._id)
nnodeidx_list = []
for nnode_str, weight in edge_dict.items():
nnode_idx, nnode = self._getNodeIdx(nnode_str)
new_node.addNB(nnode_idx, weight)
nnode.addNB(new_node._id, weight)
nnodeidx_list.append(nnode_idx)
self._m += weight
if abs(self._m) < 1e-6:
return
for nnode_idx in nnodeidx_list:
self._detBestCom(self._node_list[nnode_idx])
self._detBestCom(self._node_list[new_nodeidx])
@timing
def addEdge(self, node1_str, node2_str, weight):
node1_idx, node1 = self._getNodeIdx(node1_str)
node2_idx, node2 = self._getNodeIdx(node2_str)
ori_weight = node1._nb.get(node2_idx, 0.)
node1.addNB(node2_idx, weight)
node2.addNB(node1_idx, weight)
weight -= ori_weight
self._m += weight
if weight < 0:
return
if node1._com != node2._com:
# dq_1to2 = self._delta_q(node1_idx, node2._com, weight)
# dq_2to1 = self._delta_q(node2_idx, node1._com, weight)
dq_1to2 = self._extract_delta_q(node1_idx, node2._com)
dq_2to1 = self._extract_delta_q(node2_idx, node1._com)
if dq_1to2 < 0 and dq_2to1 < 0:
return
if dq_1to2 > dq_2to1:
# move node1 to node2._com
dst_com = self._com_list[node2._com]
src_node = node1
else:
# move node2 to node1._com
dst_com = self._com_list[node1._com]
src_node = node2
ori_com = self._com_list[src_node._com]
ori_com.delNode(src_node._id)
if ori_com._empty:
self._com_list[ori_com._id] = None
dst_com.addNode(src_node)
src_node.setCom(dst_com._id)
for nnode_idx in src_node._nb.keys():
self._detBestCom(self._node_list[nnode_idx])
@timing
def rmNode(self, node_str):
assert self._hasNodeName(node_str), 'node %s do not exist' % node_str
node_idx, node = self._getNodeIdx(node_str)
nnode_set = set()
for nnode_idx, weight in node._nb.items():
self._m -= weight
nnode = self._node_list[nnode_idx]
nnode.delNB(node_idx)
if nnode._com == node._com:
nnode_set.add(nnode_idx)
ori_com = self._com_list[node._com]
ori_com.delNode(node_idx)
if ori_com._empty:
self._com_list[ori_com._id] = None
self._node_list[node_idx] = None
del self._nodestr_dict[node_str]
subcom_list = []
while len(nnode_set) > 0:
cur_nodeidx = nnode_set.pop()
subcom = self._findCliq(self._node_list[cur_nodeidx], nnode_set)
if subcom is None:
subcom = {cur_nodeidx}
else:
nnode_set.remove(subcom[0])
nnode_set.remove(subcom[1])
subcom = {subcom[0], subcom[1], cur_nodeidx}
self._cliqPerco(subcom, nnode_set)
subcom_list.append(subcom)
# print cur_nodeidx, subcom, self._node_list[cur_nodeidx]
for subcom in subcom_list:
if len(subcom) == 1:
self._detBestCom(self._node_list[subcom.pop()])
else:
self._detBestComofNodeSet(subcom, node._com)
"""
Seperate single node from its original community
"""
def _sepSingNode(self, node):
ori_com = self._com_list[node._com]
if len(ori_com._node) == 1:
return
new_com = Community(self._com_num)
self._com_list.append(new_com)
new_com.addNode(node)
node.setCom(new_com._id)
ori_com.delNode(node._id)
if ori_com._empty:
self._com_list[ori_com._id] = None
"""
Seprate single community by edge removal
Split as 3 parts, detBestCom of 2 node set and other singleton nodes
"""
def _sepSingComByEdge(self, node1_str, node2_str):
node1_idx, node1 = self._getNodeIdx(node1_str)
node2_idx, node2 = self._getNodeIdx(node2_str)
assert node1._com == node2._com, 'Seperate community by inter-edge'
com = self._com_list[node1._com]
# find 'quasi-clique's and singleton nodes in original community
node1_set = set()
node2_set = set()
onode_set = set()
for node_idx, node in com._node.items():
if node_idx == node1_idx or node_idx in node1._nb:
if node_idx in node2._nb:
onode_set.add(node_idx)
else:
node1_set.add(node_idx)
elif node_idx == node2_idx or node_idx in node2._nb:
node2_set.add(node_idx)
else:
onode_set.add(node_idx)
# detect best community of 'quasi-clique' in node1_set
self._detBestComofNodeSet(node1_set, com._id)
# detect best community of 'quasi-clique' in node2_set
self._detBestComofNodeSet(node2_set, com._id)
# detect best community of singleton nodes in onode_set
for node_idx in onode_set:
ocd, nrd = self._detBestCom(self._node_list[node_idx])
"""
Seprate single community by edge removal
Split as 3 communities, detBestCom of 3 node set
"""
def _sepSingComByEdgev2(self, node1_str, node2_str):
node1_idx, node1 = self._getNodeIdx(node1_str)
node2_idx, node2 = self._getNodeIdx(node2_str)
assert node1._com == node2._com, 'Seperate community by inter-edge'
com = self._com_list[node1._com]
# find 'quasi-clique's and singleton nodes in original community
node1_set = set()
node2_set = set()
onode_set = set()
for node_idx, node in com._node.items():
if node_idx == node1_idx or node_idx in node1._nb:
if node_idx in node2._nb:
onode_set.add(node_idx)
else:
node1_set.add(node_idx)
elif node_idx == node2_idx or node_idx in node2._nb:
node2_set.add(node_idx)
else:
onode_set.add(node_idx)
new_com1 = Community(self._com_num)
self._com_list.append(new_com1)
for node_idx in node2_set:
node = self._node_list[node_idx]
node.setCom(new_com1._id)
new_com1.addNode(node)
com.delNode(node._id)
for node_idx in onode_set:
new_com = Community(self._com_num)
self._com_list.append(new_com)
node = self._node_list[node_idx]
node.setCom(new_com._id)
new_com.addNode(node)
com.delNode(node._id)
# detect best community of singleton nodes in onode_set
for node_idx in onode_set:
self._detBestCom(self._node_list[node_idx])
# detect best community of 'quasi-clique' in node1_set
self._detBestComofNodeSet(node1_set, com._id)
# detect best community of 'quasi-clique' in node2_set
self._detBestComofNodeSet(node2_set, new_com1._id)
"""
Seprate single community by edge removal
Split as singleton nodes, detBestCom of all nodes
"""
def _sepSingComByEdgev3(self, node1_str, node2_str):
node1_idx, node1 = self._getNodeIdx(node1_str)
node2_idx, node2 = self._getNodeIdx(node2_str)
assert node1._com == node2._com, 'Seperate community by inter-edge'
com = self._com_list[node1._com]
node_list = com._node.keys()
for node_idx in node_list:
new_com = Community(self._com_num)
self._com_list.append(new_com)
node = self._node_list[node_idx]
node.setCom(new_com._id)
new_com.addNode(node)
com.delNode(node._id)
if com._empty:
self._com_list[com._id] = None
for node_idx in node_list:
self._detBestCom(self._node_list[node_idx])
@timing
def rmEdge(self, node1_str, node2_str):
node1_idx, node1 = self._getNodeIdx(node1_str)
node2_idx, node2 = self._getNodeIdx(node2_str)
assert node2_idx in node1._nb, 'edge %s-%s do not exist' % (node1_str, node2_str)
weight = node1._nb[node2_idx]
self._m -= weight
node1.delNB(node2_idx)
node2.delNB(node1_idx)
if node1._com != node2._com:
return
ori_com = self._com_list[node1._com]
if len(node1._nb) == 0 and len(node2._nb) == 0:
self._sepSingNode(node1)
elif len(node1._nb) == 0:
self._sepSingNode(node1)
# self._detBestComofNodeSet(set(ori_com._node.keys()), ori_com._id)
elif len(node2._nb) == 0:
self._sepSingNode(node2)
# self._detBestComofNodeSet(set(ori_com._node.keys()), ori_com._id)
else:
# find maximal quasi-cliques
# detBestCom of singletons and quasi-cliques
self._sepSingComByEdgev3(node1_str, node2_str)
def clear(self):
Network.clear(self)
self._m = 0.
def checkCom(self):
for com in self._com_list:
if com is None:
continue
s = 0.
for node in com._node.values():
s += node._du
assert abs(s-com._wtot) < 1e-6, 'com:%s, s:%.3f' % (com, s)
for node in self._node_list:
if node is None:
continue
du = 0.
for nb_idx, weight in node._nb.items():
du += weight
assert abs(du-node._du) < 1e-6, 'node:%s, du:%.3f' % (node, du)
def show(self):
Network.show(self)
print('Totally: %d nodes, %d communities, %d outlier nodes' % (
len(filter(lambda idx: self._node_list[idx] is not None,
range(self._node_num))),
len(filter(lambda idx: self._com_list[idx] is not None,
range(self._com_num))),
len(filter(lambda idx: self._node_list[idx] is not None and len(self._com_list[self._node_list[idx]._com]._node) == 1,
range(self._node_num)))))
print('M:%.3f' % self._m)
print('Modularity:%.6f' % self._modularity())
def toComStruc(self):
s_list = []
for node in self._node_list:
if node is None:
continue
s_list.append('%s\t%d' % (node._name, node._com))
return '\n'.join(s_list)
"""
Load community structure from output of toComStruc
"""
def fromComStruc(self, fname):
com_node_dict = {}
nodeidx_set = set()
with open(fname) as ins:
for line in ins:
node_str, com_idx = line.strip().split()
com_idx = int(com_idx)
if com_idx not in com_node_dict:
com_node_dict[com_idx] = [node_str]
else:
com_node_dict[com_idx].append(node_str)
if len(com_node_dict) == 0:
return
self._com_list = [None] * (max(com_node_dict.keys())+1)
for com_idx, nodestr_list in com_node_dict.items():
com = Community(com_idx)
self._com_list[com_idx] = com
for node_str in nodestr_list:
assert self._hasNodeName(node_str)
node_idx, node = self._getNodeIdx(node_str)
nodeidx_set.add(node_idx)
node.setCom(com_idx)
com.addNode(node)
for node in self._node_list:
if node._id not in nodeidx_set:
com = Community(self._com_num)
self._com_list.append(com)
node.setCom(com._id)
com.addNode(node)
def main():
dy_fname = sys.argv[1]
com = QCAImp()
with open(dy_fname) as ins:
for line_no, line in enumerate(ins):
if line.startswith('addEdge'):
nodes_pair, weight = line.strip().split('\t')[1:]
weight = float(weight)
weight = 1.
node1_str, node2_str = nodes_pair.split('-')
com.addEdge(node1_str, node2_str, weight)
elif line.startswith('addNode'):
node_str, node_w, edges_str = line.strip('\n').split('\t')[1:]
edge_dict = {}
if len(edges_str.strip()) != 0:
for edge_str in edges_str.split(','):
nnode_str, weight = edge_str.split('-')
weight = 1.
edge_dict[nnode_str] = float(weight)
com.addNode(node_str, edge_dict)
elif line.startswith('rmEdge'):
nodes_pair = line.strip().split('\t', 1)[-1]
node1_str, node2_str = nodes_pair.split('-')
com.rmEdge(node1_str, node2_str)
elif line.startswith('rmNode'):
com.rmNode(line.strip().split('\t')[-1])
elif line.startswith('TIME:'):
time = int(line.strip().split()[-1])
print(time)
com.checkCom()
com.show()
com.show()
if __name__ == "__main__":
assert len(sys.argv) == 2, 'Usage:\t%s <dy_fname>' % sys.argv[0]
main()
# cProfile.run('main()', '%s.stat' % sys.argv[0])
|
[
"zhangly@dtdream.com"
] |
zhangly@dtdream.com
|
2270363e1629e3404120fa029bd191181db861e2
|
c934598867a2c91676c1f0876ff38c3231e176e4
|
/HomeWork_1.py
|
4aba2ea08322d436053b4d0d89390ddd701b8515
|
[] |
no_license
|
MustafaSarimesinli/GlobalAIHubPythonHomework
|
54cdcb434927ddfef37be74a362650eb82fd53b2
|
23cea4d510fd6b08c5cbf62cf7418c97c883783d
|
refs/heads/master
| 2023-02-08T18:37:00.591984
| 2020-12-27T13:17:51
| 2020-12-27T13:17:51
| 323,607,272
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
import time
print("""
------------------------------------------- SCHOOL DATABASE SYSTEM ---------------------------------------------
""")
values_1 = input("Please enter your name: ")
values_2 = input("Please enter your surname: ")
values_3 = int(input("Please enter your age: "))
values_4 = input("Please enter your school name: ")
values_5 = input("Please enter school number: ")
print("Your information is saved in the database, please wait...")
time.sleep(1)
print("Save is completed...\n")
print("------------------------------Your Information & Type of Values---------------------------------")
print(f"""Your Name: {values_1.title()}
Type: {type(values_1)}
Your Surname: {values_2.title()}
Type: {type(values_2)}
Your Age: {values_3}
Type: {type(values_3)}
Your School Name: {values_4.title()}
Type: {type(values_4)}
Your School Number: {values_5.title()}
Type: {type(values_5)}""")
|
[
"sarimesinlimustafa@gmail.com"
] |
sarimesinlimustafa@gmail.com
|
6cc64bd28248b5600fa843a0d944681d6b73bcfa
|
f105e42548d67229d24b67cc25c3caeb2db66183
|
/tclone/accounts/views.py
|
de38491851f130848c6af1ef9de5bd057f55d35c
|
[] |
no_license
|
taiki-dev/tclone
|
f2aa5c2dc867b5570f853f558ab3db8cfff7f1a7
|
a192e1bf68cd2209169d45e66590c70cff601355
|
refs/heads/master
| 2022-12-17T06:33:50.123445
| 2020-09-17T12:54:11
| 2020-09-17T12:54:11
| 296,268,157
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
from django.contrib.auth import login
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
return redirect('login/')
else:
form = UserCreationForm()
return render(request, 'signup.html', {'form': form})
def thanks(request):
return render(request, 'thanks.html')
|
[
"taiki.murakoshi.0908@gmail.com"
] |
taiki.murakoshi.0908@gmail.com
|
5347fcfd7d3f56e0891c24f6b8d5dc8b119894fd
|
a04bd4dc9057b6e4527c30a506e7c0c453fcf5db
|
/hl/pas/samlplugin/saml2/mongo_store.py
|
6fd38943a4772e380ddc41d1ee81c425256cc889
|
[
"BSD-2-Clause"
] |
permissive
|
saromba/hl.pas.samlplugin
|
3d67bab8c5cc3d8c8d0ba8960150ebc51dc44b3d
|
0a120cc590e208ac19616f35ab567e94294ca0ce
|
refs/heads/master
| 2020-12-28T23:16:53.540274
| 2015-04-14T08:20:56
| 2015-04-14T08:20:56
| 33,917,939
| 0
| 0
| null | 2015-04-14T07:42:47
| 2015-04-14T07:42:47
| null |
UTF-8
|
Python
| false
| false
| 11,641
|
py
|
from hashlib import sha1
import logging
from pymongo import MongoClient
from saml2.eptid import Eptid
from saml2.mdstore import MetaData
from saml2.s_utils import PolicyError
from saml2.ident import code, IdentDB, Unknown
from saml2.mdie import to_dict, from_dict
from saml2 import md
from saml2 import saml
from saml2.extension import mdui
from saml2.extension import idpdisc
from saml2.extension import dri
from saml2.extension import mdattr
from saml2.extension import ui
import xmldsig
import xmlenc
ONTS = {
saml.NAMESPACE: saml,
mdui.NAMESPACE: mdui,
mdattr.NAMESPACE: mdattr,
dri.NAMESPACE: dri,
ui.NAMESPACE: ui,
idpdisc.NAMESPACE: idpdisc,
md.NAMESPACE: md,
xmldsig.NAMESPACE: xmldsig,
xmlenc.NAMESPACE: xmlenc
}
__author__ = 'rolandh'
logger = logging.getLogger(__name__)
class CorruptDatabase(Exception):
pass
def context_match(cfilter, cntx):
# TODO
return True
class SessionStorageMDB(object):
""" Session information is stored in a MongoDB database"""
def __init__(self, collection=""):
connection = MongoClient()
db = connection[collection]
self.assertion = db.assertion
def store_assertion(self, assertion, to_sign):
name_id = assertion.subject.name_id
nkey = sha1(code(name_id)).hexdigest()
doc = {
"name_id_key": nkey,
"assertion_id": assertion.id,
"assertion": to_dict(assertion, ONTS.values(), True),
"to_sign": to_sign
}
_ = self.assertion.insert(doc)
def get_assertion(self, cid):
res = []
for item in self.assertion.find({"assertion_id": cid}):
res.append({"assertion": from_dict(item["assertion"], ONTS, True),
"to_sign": item["to_sign"]})
if len(res) == 1:
return res[0]
elif res is []:
return None
else:
raise SystemError("More then one assertion with the same ID")
def get_assertions_by_subject(self, name_id=None, session_index=None,
requested_context=None):
"""
:param name_id: One of name_id or key can be used to get the authn
statement
:param session_index: If match against a session index should be done
:param requested_context: Authn statements should match a specific
authn context
:return:
"""
result = []
key = sha1(code(name_id)).hexdigest()
for item in self.assertion.find({"name_id_key": key}):
assertion = from_dict(item["assertion"], ONTS, True)
if session_index or requested_context:
for statement in assertion.authn_statement:
if session_index:
if statement.session_index == session_index:
result.append(assertion)
break
if requested_context:
if context_match(requested_context,
statement.authn_context):
result.append(assertion)
break
else:
result.append(assertion)
return result
def remove_authn_statements(self, name_id):
logger.debug("remove authn about: %s" % name_id)
key = sha1(code(name_id)).hexdigest()
for item in self.assertion.find({"name_id_key": key}):
self.assertion.remove(item["_id"])
def get_authn_statements(self, name_id, session_index=None,
requested_context=None):
"""
:param name_id:
:param session_index:
:param requested_context:
:return:
"""
return [k.authn_statement for k in self.get_assertions_by_subject(
name_id, session_index, requested_context)]
class IdentMDB(IdentDB):
def __init__(self, collection="", domain="", name_qualifier=""):
IdentDB.__init__(self, None, domain, name_qualifier)
self.mdb = MDB(collection, "ident")
self.mdb.primary_key = "user_id"
def in_store(self, _id):
if [x for x in self.mdb.get({"ident_id": _id})]:
return True
else:
return False
def create_id(self, nformat, name_qualifier="", sp_name_qualifier=""):
_id = self._create_id(nformat, name_qualifier, sp_name_qualifier)
while self.in_store(_id):
_id = self._create_id(nformat, name_qualifier, sp_name_qualifier)
return _id
def store(self, ident, name_id):
self.mdb.store(ident, name_id=to_dict(name_id, ONTS.values(), True))
def find_nameid(self, userid, nformat=None, sp_name_qualifier=None,
name_qualifier=None, sp_provided_id=None):
kwargs = {}
if nformat:
kwargs["name_format"] = nformat
if sp_name_qualifier:
kwargs["sp_name_qualifier"] = sp_name_qualifier
if name_qualifier:
kwargs["name_qualifier"] = name_qualifier
if sp_provided_id:
kwargs["sp_provided_id"] = sp_provided_id
res = []
for item in self.mdb.get(userid, **kwargs):
res.append(from_dict(item["name_id"], ONTS, True))
return res
def find_local_id(self, name_id):
cnid = to_dict(name_id, ONTS.values(), True)
for item in self.mdb.get({"name_id": cnid}):
return item[self.mdb.primary_key]
return None
def remove_remote(self, name_id):
cnid = to_dict(name_id, ONTS.values(), True)
self.mdb.remove({"name_id": cnid})
def handle_name_id_mapping_request(self, name_id, name_id_policy):
_id = self.find_local_id(name_id)
if not _id:
raise Unknown("Unknown entity")
if name_id_policy.allow_create == "false":
raise PolicyError("Not allowed to create new identifier")
# else create and return a new one
return self.construct_nameid(_id, name_id_policy=name_id_policy)
def close(self):
pass
#------------------------------------------------------------------------------
class MDB(object):
primary_key = "mdb"
def __init__(self, collection="", sub_collection=""):
connection = MongoClient()
_db = connection[collection]
self.db = _db[sub_collection]
def store(self, value, **kwargs):
if value:
doc = {self.primary_key: value}
else:
doc = {}
doc.update(kwargs)
_ = self.db.insert(doc)
def get(self, value=None, **kwargs):
if value:
doc = {self.primary_key: value}
doc.update(kwargs)
return [item for item in self.db.find(doc)]
elif kwargs:
return [item for item in self.db.find(kwargs)]
def remove(self, key=None, **kwargs):
if key is None:
if kwargs:
for item in self.db.find(kwargs):
self.db.remove(item["_id"])
else:
doc = {self.primary_key: key}
doc.update(kwargs)
for item in self.db.find(doc):
self.db.remove(item["_id"])
def keys(self):
for item in self.db.find():
yield item[self.primary_key]
def items(self):
for item in self.db.find():
_key = item[self.primary_key]
del item[self.primary_key]
del item["_id"]
yield _key, item
def __contains__(self, key):
doc = {self.primary_key: key}
res = [item for item in self.db.find(doc)]
if not res:
return False
else:
return True
def reset(self):
self.db.drop()
#------------------------------------------------------------------------------
class EptidMDB(Eptid):
def __init__(self, secret, collection="", sub_collection="eptid"):
Eptid.__init__(self, secret)
self.mdb = MDB(collection, sub_collection)
self.mdb.primary_key = "eptid_key"
def __getitem__(self, key):
res = self.mdb.get(key)
if not res:
raise KeyError(key)
elif len(res) == 1:
return res[0]["eptid"]
else:
raise CorruptDatabase("Found more than one EPTID document")
def __setitem__(self, key, value):
_ = self.mdb.store(key, **{"eptid": value})
#------------------------------------------------------------------------------
def protect(dic):
res = {}
for key, val in dic.items():
key = key.replace(".", "__")
if isinstance(val, basestring):
pass
elif isinstance(val, dict):
val = protect(val)
elif isinstance(val, list):
li = []
for va in val:
if isinstance(va, basestring):
pass
elif isinstance(va, dict):
va = protect(va)
# I don't think lists of lists will appear am I wrong ?
li.append(va)
val = li
res[key] = val
return res
def unprotect(dic):
res = {}
for key, val in dic.items():
if key == "__class__":
pass
else:
key = key.replace("__", ".")
if isinstance(val, basestring):
pass
elif isinstance(val, dict):
val = unprotect(val)
elif isinstance(val, list):
li = []
for va in val:
if isinstance(va, basestring):
pass
elif isinstance(val, dict):
va = unprotect(va)
li.append(va)
val = li
res[key] = val
return res
def export_mdstore_to_mongo_db(mds, collection, sub_collection=""):
mdb = MDB(collection, sub_collection)
mdb.reset()
mdb.primary_key = "entity_id"
for key, desc in mds.items():
kwargs = {
"entity_description": protect(desc),
}
mdb.store(key, **kwargs)
class MetadataMDB(MetaData):
def __init__(self, onts, attrc, collection="", sub_collection=""):
MetaData.__init__(self, onts, attrc)
self.mdb = MDB(collection, sub_collection)
self.mdb.primary_key = "entity_id"
def _ext_service(self, entity_id, typ, service, binding):
try:
srvs = self[entity_id][typ]
except KeyError:
return None
if not srvs:
return srvs
res = []
for srv in srvs:
if "extensions" in srv:
for elem in srv["extensions"]["extension_elements"]:
if elem["__class__"] == service:
if elem["binding"] == binding:
res.append(elem)
return res
def load(self):
pass
def items(self):
for key, item in self.mdb.items():
yield key, unprotect(item["entity_description"])
def keys(self):
return self.mdb.keys()
def values(self):
for key, item in self.mdb.items():
yield unprotect(item["entity_description"])
def __contains__(self, item):
return item in self.mdb
def __getitem__(self, item):
res = self.mdb.get(item)
if not res:
raise KeyError(item)
elif len(res) == 1:
return unprotect(res[0]["entity_description"])
else:
raise CorruptDatabase("More then one document with key %s" % item)
def bindings(self, entity_id, typ, service):
pass
|
[
"t_schorr@gmx.de"
] |
t_schorr@gmx.de
|
2fbfe1db3f59027dc789b1096273144c9025a4c9
|
cd78d84441e69c1fc40b6a6e9e235e7cf6882454
|
/python/114.flatten_binary_tree_to_linked_list.py
|
6f58b41e670f5b1e9ffe984459a6c9d571d73195
|
[] |
no_license
|
buy/leetcode
|
53a12d4e0298284a5a2034c88353d0dc195aa66c
|
da0e834e3f2e3016396fffc96ef943ab9ec58ea4
|
refs/heads/master
| 2021-01-13T01:48:01.176632
| 2015-06-14T06:17:17
| 2015-06-14T06:17:17
| 31,863,627
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
# Given a binary tree, flatten it to a linked list in-place.
# For example,
# Given
# 1
# / \
# 2 5
# / \ \
# 3 4 6
# The flattened tree should look like:
# 1
# \
# 2
# \
# 3
# \
# 4
# \
# 5
# \
# 6
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param {TreeNode} root
# @return {void} Do not return anything, modify root in-place instead.
def flatten(self, root):
self.flat(root)
def flat(self, root):
if not root:
return None
left = self.flat(root.left)
right = self.flat(root.right)
root.left = None
root.right = None
if left:
root.right = left
if right:
self.getTail(root).right = right
return root
def getTail(self, root):
while root.right:
root = root.right
return root
|
[
"cliu@groupon.com"
] |
cliu@groupon.com
|
b681003f50349bf1c861817a9f45d4dbaf85ebb0
|
48395cb86d4a618ac300fbcfa22ced193552cb3e
|
/core/shell.py
|
0b081f33fd45c75a57b21a1f3893e845875a19dc
|
[
"MIT"
] |
permissive
|
Lola224/howboutdat
|
7358f815f388559e8a0a8ca9b441a91b6af72a5e
|
b746f4d218f9e8f10c09cc3f91b876dc186358ea
|
refs/heads/master
| 2023-07-29T05:16:09.840232
| 2021-09-09T00:45:36
| 2021-09-09T00:45:36
| 404,538,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,114
|
py
|
# Copyright (C) 2015 Noa-Emil Nissinen (4shadoww)
# Import python modules
import sys
# Import core modules
from core.module_manager import ModuleManager
from core import colors
from core import command_handler
shellface = "["+colors.bold+"hakku"+colors.end+"]:"
mm = ModuleManager()
def run():
global shellface
global mm
ch = command_handler.Commandhandler(mm, False)
while True:
try:
set_face()
command = input(shellface+" ")
ch.handle(command)
except KeyboardInterrupt:
if mm.module_loaded == 0:
print()
sys.exit(0)
else:
print()
mm.module_loaded = 0
mm.module_name = ""
print(colors.bold + colors.red + "ctrl + c detected going back..." + colors.end)
def set_face():
global shellface
global mm
if mm.module_loaded == 0:
shellface = "["+colors.bold+"hakku"+colors.end+"]:"
else:
shellface = "["+colors.bold+"hakku"+colors.end+"]"+"("+colors.red+mm.module_name+colors.end+"):"
|
[
"89975275+Lola224@users.noreply.github.com"
] |
89975275+Lola224@users.noreply.github.com
|
83b2792ba8d5d121b4fc7c7eaca1bfaf512c6aac
|
677a5c8834a3a3933f81eba075d4c9774e5f4682
|
/apple-card.py
|
82f25ca8fd9ece0ccc93ec67de437c5169ca7be6
|
[] |
no_license
|
mspych/tiller-scripts
|
b3fb78b295e38ef1a79cddd752a78d1659890e07
|
b078f7fede5c80491491cf2dd6a513741f075f94
|
refs/heads/master
| 2022-04-05T10:38:43.932102
| 2020-02-02T20:01:56
| 2020-02-02T20:01:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,999
|
py
|
import fire
import pandas as pd
import datetime as dt
from functools import partial
class AppleStatement:
def __init__(self, *args):
parse_apple_statement = partial(pd.read_csv,
usecols=["Transaction Date", "Merchant", "Category", "Description",
"Amount (USD)"],
dtype={"Transaction Date": "str", "Merchant": "str", "Category": "str",
"Description": "str", "Amount (USD)": "float32"},
parse_dates=["Transaction Date"])
dfs = (parse_apple_statement(f) for f in args)
self.input = pd.concat(dfs)
def head(self):
print(self.input.head())
print(self.input.dtypes)
def tillerize(self, output, last4="1234"):
df = pd.DataFrame(
columns="""Tiller Date Description Category Amount Account Account_Number Institution
Month Week Transaction_ID Check_Number Full_Description Date_Added""".split()
)
df["Tiller"] = ""
df["Date"] = self.input["Transaction Date"]
df["Description"] = self.input["Merchant"]
df["Category"] = self.input["Category"]
df["Amount"] = self.input["Amount (USD)"].apply(func=lambda x: round(x, 2) * -1)
df["Account"] = "CREDIT CARD"
df["Account_Number"] = "xxxx" + str(last4)
df["Institution"] = "Apple"
df["Month"] = df["Date"].apply(func=lambda d: d.replace(day=1))
df["Week"] = df['Date'] - pd.to_timedelta(arg=df['Date'].dt.weekday, unit='D')
df["Transaction_ID"] = ""
df["Full_Description"] = self.input["Description"]
df["Date_Added"] = pd.to_datetime(dt.date.today())
print(df.head())
df.to_csv(path_or_buf=output,
index=False,
date_format='%m/%d/%Y')
if __name__ == '__main__':
fire.Fire(AppleStatement)
|
[
"bobetto@Robertos-MacBook-Pro-2.local"
] |
bobetto@Robertos-MacBook-Pro-2.local
|
8da5b0f5002683c5b861c6f1187916a45af04490
|
c50a9defdf08338a76c916e40acf5e6e3d6b3040
|
/features/steps/cart.py
|
c2dbf2f417c5abf15cd0ecc1ee4f776d584ebcb0
|
[] |
no_license
|
gusakovm/python-selenium-automation
|
03e985047b782df929aacd85d5bf6885735310d1
|
07d64cc403caef35432cbc6f62e17e68cc96e290
|
refs/heads/master
| 2020-07-29T14:44:02.242961
| 2019-10-16T12:50:48
| 2019-10-16T12:50:48
| 209,847,858
| 0
| 0
| null | 2019-09-20T17:40:33
| 2019-09-20T17:40:33
| null |
UTF-8
|
Python
| false
| false
| 1,077
|
py
|
from selenium.webdriver.common.by import By
from behave import when, then
from time import sleep
CART_CONTENT_H1 = (By.CSS_SELECTOR, '#sc-active-cart .sc-empty-cart-header')
NAV_CART = (By.CSS_SELECTOR, '#nav-cart')
NAV_CART_COUNTER = (By.CSS_SELECTOR, '#nav-cart #nav-cart-count')
CART_ITEMS = (By.CSS_SELECTOR, '.a-row .sc-list-item')
@then('Make sure the cart isn\'t empty (by counter)')
def check_cart_isnt_empty_in_nav(context):
cart_counter = context.driver.find_element(*NAV_CART_COUNTER)
cart_counter_num = cart_counter.text
assert int(cart_counter_num) > 0, f"Actual items in cart is {cart_counter_num}"
@then('Make sure the item shows at the Cart page')
def check_cart_isnt_empty_on_cart_page(context):
nav_cart = context.driver.find_element(*NAV_CART)
nav_cart.click()
sleep(3)
cart_items = context.driver.find_elements(*CART_ITEMS)
assert len(cart_items) > 0
@then('Amazon Cart page title is {text}')
def cart_is_empty(context, text):
cart_h1 = context.driver.find_element(*CART_CONTENT_H1)
assert text in cart_h1.text
|
[
"max@gusakov.com"
] |
max@gusakov.com
|
99d2a42513b1e14b5d28a051deaf75c9300be265
|
003af3e825dacc2b5d05475ecf7ae4ac061550e1
|
/blinkt/illumination.py
|
33524b18ca6f3dd53c3a3c0fd91d672447e85a9e
|
[
"Apache-2.0"
] |
permissive
|
JKarthaus/RadioRaspi
|
70922029f1925e914f2c9811a181ad347bbef455
|
e095caa8aa0c7ae76d1697c181cbcc2f58940228
|
refs/heads/master
| 2020-03-29T17:18:36.893996
| 2018-12-17T20:13:22
| 2018-12-17T20:13:22
| 150,155,059
| 0
| 0
|
Apache-2.0
| 2018-12-17T20:13:24
| 2018-09-24T19:10:52
|
Python
|
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
#!/usr/bin/env python
import colorsys
import math
import time
from sys import exit
import signal
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
import blinkt
blinkt.set_clear_on_exit()
hue_range = 120
hue_start = 0
max_brightness = 0.2
killer = GracefulKiller()
def show_graph(v, r, g, b):
v *= blinkt.NUM_PIXELS
for x in range(blinkt.NUM_PIXELS):
hue = ((hue_start + ((x / float(blinkt.NUM_PIXELS)) * hue_range)) % 360) / 360.0
r, g, b = [int(c * 255) for c in colorsys.hsv_to_rgb(hue, 1.0, 1.0)]
if v < 0:
brightness = 0
else:
brightness = min(v, 1.0) * max_brightness
blinkt.set_pixel(x, r, g, b, brightness)
v -= 1
blinkt.show()
blinkt.set_brightness(0.1)
while True:
if killer.kill_now:
blinkt.clear
break
t = time.time() * 2
v = (math.sin(t) + 1) / 2 # Get a value between 0 and 1
show_graph(v, 255, 0, 255)
time.sleep(0.01)
|
[
"joern.karthaus@gmail.com"
] |
joern.karthaus@gmail.com
|
bcc6a25837963c10c9f36f407f827516687c57ed
|
df449a2eca3caa28164d0a38ffb4fcafcbaba187
|
/web_scraper_2015.py
|
1028253b278e21df2b13b306e17dceb73e60da5b
|
[] |
no_license
|
cklcit03/ml-march-madness
|
c866ae13915e66016adb0722413516fce9f8df28
|
8749b00fb05daea7c9a7b3c9e775274df49a519a
|
refs/heads/master
| 2020-03-10T20:04:56.862198
| 2019-03-02T05:21:11
| 2019-03-02T05:21:11
| 129,562,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,200
|
py
|
# Copyright (C) 2019 Caleb Lo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Machine Learning March Madness
# Web scraper that obtains additional features
from bs4 import BeautifulSoup
import difflib
import numpy
import urllib2
class Error(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def main():
""" Main function
"""
print("Loading list of teams.")
teams = numpy.genfromtxt("teams_2015.csv", dtype=str, delimiter=",")
team_ids = teams[1:, 0]
num_teams = team_ids.shape[0]
team_names = teams[1:, 1]
team_names_list = team_names.tolist()
# Iterate over seasons (starting in 2001-02)
num_seasons = 14
base_year = 2002
for season_idx in range(0, num_seasons):
file_mat = 10000*numpy.ones((num_teams, 3), dtype=object)
file_mat[:, 1] = team_ids
curr_year = base_year+season_idx
for team_idx in range(0, num_teams):
file_mat[team_idx, 0] = curr_year
print("season = %d" % curr_year)
url_string = 'https://kenpom.com/index.php?y=%d' % curr_year
page = urllib2.urlopen(url_string)
soup = BeautifulSoup(page, 'html.parser')
trs = soup.find_all('tr')
tr_idx = 0
for tr in trs:
tds = tr.find_all('td')
if tds:
school = tds[1].a.string
result = difflib.get_close_matches(school, team_names_list)
found_flag = 0
if (school == 'Coastal Carolina'):
school_id = 1157
elif (school == 'East Tennessee St.'):
school_id = 1190
elif (school == 'FIU'):
school_id = 1198
elif (school == 'Kent St.'):
school_id = 1245
elif (school == 'LIU Brooklyn'):
school_id = 1254
elif (school == 'Louisiana Lafayette'):
school_id = 1418
elif (school == 'Louisiana Monroe'):
school_id = 1419
elif (school == 'Maryland Eastern Shore'):
school_id = 1271
elif (school == 'Middle Tennessee'):
school_id = 1292
elif (school == 'Mississippi Valley St.'):
school_id = 1290
elif (school == 'Nebraska Omaha'):
school_id = 1303
elif (school == 'North Carolina A&T'):
school_id = 1299
elif (school == 'North Carolina Central'):
school_id = 1300
elif (school == 'North Carolina St.'):
school_id = 1301
elif (school == 'North Dakota St.'):
school_id = 1295
elif (school == 'Northwestern St.'):
school_id = 1322
elif (school == 'South Carolina St.'):
school_id = 1354
elif (school == 'South Dakota St.'):
school_id = 1355
elif (school == 'Southern'):
school_id = 1380
elif (school == 'Southwest Missouri St.'):
school_id = 1283
elif (school == 'Southwest Texas St.'):
school_id = 1402
elif (school == 'Tennessee Martin'):
school_id = 1404
elif (school == 'UMKC'):
school_id = 1282
elif (school == 'UTSA'):
school_id = 1427
elif (school == 'VCU'):
school_id = 1433
else:
found_flag = 1
print("school = %s, result = %s" % (school, result))
best_match = result[0]
match_idx = numpy.where(team_names == best_match)
school_id = team_ids[match_idx[0]].astype(int)
if (found_flag == 0):
match_idx = numpy.where(team_ids.astype(int) == school_id)
file_mat[match_idx[0], 2] = float(tds[4].string)
tr_idx = tr_idx+1
print("number of teams = %d" % tr_idx)
if (season_idx == 0):
total_file_mat = file_mat
else:
total_file_mat = numpy.r_[total_file_mat, file_mat]
numpy.savetxt('kenpom_2015.csv', total_file_mat, fmt='%s', delimiter=',')
# Call main function
if __name__ == "__main__":
main()
|
[
"flashbuzzer@gmail.com"
] |
flashbuzzer@gmail.com
|
fdfc57cd6512126f1c8c13068d33cb184ab73f82
|
ed789322d285f5d0a5c9b0ba97cf5dafdf3e671c
|
/apps/users/migrations/0001_initial.py
|
fc5334d9f22ab7bc3678281762347cfd3bdcd6bb
|
[] |
no_license
|
mkrj/OnlineShop
|
7be98c96debf62199fd31b46016841697af4ad6c
|
fb55c04f455302c89769ba7cac93c89294f501f1
|
refs/heads/master
| 2022-12-22T15:33:15.197694
| 2020-07-15T08:09:28
| 2020-07-15T08:09:28
| 268,022,909
| 2
| 1
| null | 2022-12-08T11:01:48
| 2020-05-30T06:30:08
|
Python
|
UTF-8
|
Python
| false
| false
| 3,935
|
py
|
# Generated by Django 2.0 on 2020-06-04 18:20
import datetime
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(blank=True, max_length=30, null=True, verbose_name='姓名')),
('birthday', models.DateField(blank=True, null=True, verbose_name='出生年月')),
('gender', models.CharField(choices=[('male', '男'), ('female', '女')], default=('female', '女'), max_length=6)),
('mobile', models.CharField(max_length=11, verbose_name='电话')),
('email', models.EmailField(blank=True, max_length=100, null=True, verbose_name='邮箱')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='VerifyCode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10, verbose_name='验证码')),
('mobile', models.CharField(max_length=11, verbose_name='电话')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='添加时间')),
],
options={
'verbose_name': '短信验证码',
'verbose_name_plural': '短信验证码',
},
),
]
|
[
"2294240112@qq.com"
] |
2294240112@qq.com
|
e129c89a972b58a0d412098db56fd6e35cfb543b
|
92d39af86fc0de20ca85326275a7eafb2e3117c8
|
/app.py
|
d2f164bf078e0375779137495ba6145e5817be5c
|
[] |
no_license
|
victoray/BookManager
|
bc2daca5b7e8c9ebabf0cb8ef63ea8b8a2b82753
|
b1c8c03b54d763381a6f0a4a84b6cd708a300978
|
refs/heads/master
| 2020-07-22T18:09:11.996483
| 2019-09-11T10:43:56
| 2019-09-11T10:43:56
| 207,285,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,045
|
py
|
from flask import Flask, render_template, request, redirect, url_for
from sqlalchemy.orm import sessionmaker
from waitress import serve
from db_setup import Base, Books, Comments
from sqlalchemy import create_engine
engine = create_engine('postgresql://postgres:postgres@localhost:5432/bookmanager')
# engine = create_engine('sqlite:///bookmanager.db')
Base.metadata.bind = engine
db_session = sessionmaker(bind=engine)()
app = Flask('__main__')
@app.route('/')
def home():
books = db_session.query(Books).limit(1000)
for book in books:
try:
book.comments = db_session.query(Comments).filter(Comments.book_id == book.id).count()
db_session.commit()
except Exception as e:
print(e)
db_session.rollback()
db_session.close()
return render_template('index.html', books=books)
@app.route('/add-book', methods=['GET', 'POST'])
def add_book():
if request.method == 'POST':
for i in range(100):
book = Books(title=f"{request.form['title']} {i}",
author=request.form['author'],
isbn=request.form['isbn'],
summary=request.form['summary'])
db_session.add(book)
db_session.commit()
db_session.close()
return redirect(url_for('home'))
return render_template('add-book.html')
@app.route('/<int:book_id>/delete-book', methods=['POST'])
def delete_book(book_id):
book = db_session.query(Books).filter(Books.id == book_id).one()
comments = db_session.query(Comments).filter(Comments.book_id == book_id).all()
for comment in comments:
db_session.delete(comment)
db_session.delete(book)
db_session.commit()
db_session.close()
return redirect(url_for('home'))
@app.route('/<int:book_id>/edit-book', methods=['GET', 'POST'])
def edit_book(book_id):
book = db_session.query(Books).filter(Books.id == book_id).one()
if request.method == 'POST':
book.id = book_id
book.title = request.form['title']
book.author = request.form['author']
book.isbn = request.form['isbn']
book.summary = request.form['summary']
db_session.commit()
db_session.close()
return redirect(url_for('home'))
db_session.close()
return render_template('edit-book.html', book=book)
@app.route('/<int:book_id>', methods=['POST', 'GET'])
def view_book(book_id):
book = db_session.query(Books).filter(Books.id == book_id).one()
comments = db_session.query(Comments).filter(Comments.book_id == book_id).all()
if request.method == 'POST':
comment = Comments(comment=request.form['comment'], book_id=book_id)
db_session.add(comment)
db_session.commit()
db_session.close()
return redirect(url_for('view_book', book_id=book_id))
db_session.close()
return render_template('book.html', book=book, comments=comments)
if __name__ == '__main__':
app.debug = True
app.run()
|
[
"viktoray007@gmail.com"
] |
viktoray007@gmail.com
|
535f3f1814383b30666c5b311255169b1dbcb59f
|
520f9caeb5c380c42efb26f6778340e8fa7deba8
|
/Novice/04-02/sparepart/part/urls.py
|
2d2ad953d7085b3f452574fdd85c6f4e920d31cd
|
[] |
no_license
|
Tama96/praxis-academy
|
1fda57d4ddae1d87deb5bede2e9475e01ab75214
|
b4de60ea0246dac6ee5f898454e5942e0ca91ac8
|
refs/heads/master
| 2022-12-26T13:49:09.925185
| 2020-10-12T01:39:07
| 2020-10-12T01:39:07
| 287,658,864
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
from django.contrib import admin
from django.urls import path
from part import views
urlpatterns = [
# from Sparepart
path('admin/', admin.site.urls),
path('emp', views.emp),
path('show', views.show),
path('edit/<int:id>', views.edit),
path('update/<int:id>', views.update),
path('delete/<int:id>', views.destroy),
# from Pekerja
path('home', views.home),
path('show_pegawai', views.show_pegawai),
path('edit_pegawai/<int:id>', views.edit_pegawai),
path('update_pegawai/<int:id>', views.update_pegawai),
path('delete_pegawai/<int:id>', views.destroy_pegawai),
# front page
path('', views.menu),
# from Harga
path('home_harga', views.home_harga),
path('show_harga', views.show_harga),
path('edit_harga/<int:id>', views.edit_harga),
path('update_harga/<int:id>', views.update_harga),
path('delete_harga/<int:id>', views.destroy_harga),
]
|
[
"aldypratama96@gmail.com"
] |
aldypratama96@gmail.com
|
298303abeac1510e8db937cfdf75a92dfcec4501
|
68a432b460fba87b38ef117364c5c6e18b589090
|
/custom_components/ble_monitor/test/test_brifit_parser.py
|
ec5f1cf810d7d7b72449c31d9f598e456929fa46
|
[] |
no_license
|
radio-m/homeassistant
|
551bfafb3f5677b526ec41ffeb663c48a191a20b
|
0dda84ecc182bfdd2e7f634dc693eebb9a0ff006
|
refs/heads/master
| 2022-08-17T02:28:01.227886
| 2022-07-31T06:56:55
| 2022-07-31T06:56:55
| 315,934,165
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
"""The tests for the Brifit ble_parser."""
from ble_monitor.ble_parser import BleParser
class TestBrifit:
"""Tests for the Brifit parser"""
def test_brifit(self):
"""Test brifit parser."""
data_string = "043E2B0201000085B07438C1A41F05095432303102010614FF55AA0101A4C13874B08501070A1D10F064000100D6"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"], "Brifit"
assert sensor_msg["type"], "T201"
assert sensor_msg["mac"], "A4C13874B085"
assert sensor_msg["packet"], "no packet id"
assert sensor_msg["data"]
assert sensor_msg["temperature"], 25.89
assert sensor_msg["humidity"], 43.36
assert sensor_msg["voltage"], 2.63
assert sensor_msg["battery"], 100
assert sensor_msg["rssi"], -42
|
[
"doc.zerg@gmail.com"
] |
doc.zerg@gmail.com
|
d5b536815238e7e5882d6315ad7044d8325dd4ad
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2406/60870/287457.py
|
a7d7347477f9630efe3683f26279554b8f27c16b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
nums = int(input())
array = []
for i in range(nums):
array.append(int(input()))
res = []
for i in range(nums):
res.append(array[nums - i - 1])
res.sort()
count = -1
for j in range(len(array)):
for i in range(len(array)):
goal = res[i]
if array.index(goal) != i:
index = array.index(goal)
temp = array[index]
array[index] = array[i]
array[i] = temp
count = count + 1
print(count)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
6a9e0a76ce17886a399c10e1d8cd795c9fcab029
|
c974264a6f6971cb5c800ceac93bd10e67f0686a
|
/hello.py
|
750db0a6e81a4bbed67ff1e709a9fc1acae34f21
|
[] |
no_license
|
mukeshdevmurari/pythonrepo
|
9fd6ee29e895e7cc14bcb3a70ae633596890c0c1
|
11547d178f615f5809c11bb2e3e62f31e0f976ef
|
refs/heads/master
| 2020-04-02T04:24:22.695291
| 2020-03-02T03:46:49
| 2020-03-02T03:46:49
| 154,015,033
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35
|
py
|
print("hello")
print("Second Line")
|
[
"mukesh@jogiinfotech.com"
] |
mukesh@jogiinfotech.com
|
27e0c8b6ce319284acc5d72b442e1acbbf9c5e55
|
fd2a23b8fd924ddc385ad480b5e7dd1174961f25
|
/pyltp_eg1.py
|
abd9442196131ec57601565ad5ea7f51ea2683b4
|
[] |
no_license
|
shamrock222/nlp-learning
|
596862ea1c1208d0bff7e8ffae11f9cec42ddf43
|
93c752811a35b505471c8bea8325db14389444eb
|
refs/heads/master
| 2020-04-14T04:51:11.886331
| 2019-01-30T15:31:10
| 2019-01-30T15:31:10
| 163,647,180
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,055
|
py
|
# -*- coding: utf-8 -*-
"""
@refer: https://pyltp.readthedocs.io/zh_CN/latest/api.html#id21
@usage: pyltp 是 LTP 的 Python 封装,提供了分词,词性标注,命名实体识别,依存句法分析,语义角色标注的功能
"""
import os
from pyltp import SentenceSplitter, Segmentor, CustomizedSegmentor, Postagger, NamedEntityRecognizer,Parser, SementicRoleLabeller
class ltp_analyzer:
def __init__(self, model_path):
self.cws_model_path = os.path.join(model_path, 'cws.model') # 分词模型路径,模型名称为`cws.model`
self.pos_model_path = os.path.join(model_path, 'pos.model') # 词性标注模型路径,模型名称为`pos.model`
self.ner_model_path = os.path.join(model_path, 'ner.model') # 命名实体识别模型路径,模型名称为`pos.model`
self.par_model_path = os.path.join(model_path, 'parser.model') # 依存句法分析模型路径,模型名称为`parser.model`
def get_split_sentences(self, text):
# 分句
sents = SentenceSplitter.split(text) # 分句
print('\n'.join(sents))
return list(sents)
def get_cut_words(self, text, dict_path=None):
# 分词
segmentor = Segmentor() # 初始化实例
if dict_path is None:
segmentor.load(self.cws_model_path) # 加载模型
else:
segmentor.load_with_lexicon(self.cws_model_path, dict_path) # 加载模型,第二个参数是您的外部词典文件路径
words = segmentor.segment(text)
print('\t'.join(words))
segmentor.release()
return list(words)
def get_customized_cut_words(self, text, model_file):
# 个性化分词模型
customized_segmentor = CustomizedSegmentor() # 初始化实例
customized_segmentor.load(self.cws_model_path, model_file) # 加载模型,第二个参数是您的增量模型路径
words = customized_segmentor.segment(text)
print('\t'.join(words))
customized_segmentor.release()
return list(words)
def get_postags(self, words):
postagger = Postagger() # 初始化实例
postagger.load(self.pos_model_path) # 加载模型
postags = postagger.postag(words) # 词性标注
print('\t'.join(postags))
postagger.release() # 释放模型
return list(postags)
def get_netags(self, words):
# 命名实体识别
postags = self.get_postags(words)
recognizer = NamedEntityRecognizer() # 初始化实例
recognizer.load(self.ner_model_path) # 加载模型
netags = recognizer.recognize(list(words), list(postags)) # 命名实体识别
print('\t'.join(netags))
recognizer.release() # 释放模型
return list(netags)
def get_dependency(self, words):
# 句法分析
postags = self.get_postags(words)
parser = Parser() # 初始化实例
parser.load(self.par_model_path) # 加载模型
arcs = parser.parse(words, postags) # 句法分析
print("\t".join("%d:%s" % (arc.head, arc.relation) for arc in arcs))
parser.release() # 释放模型
return arcs
def get_srl(self, words):
# 语义角色标注
labeller = SementicRoleLabeller() # 初始化实例
labeller.load(self.srl_model_path) # 加载模型
# arcs 使用依存句法分析的结果
postags = self.get_postags(words)
arcs = self.get_dependency(words)
roles = labeller.label(words, postags, arcs) # 语义角色标注
# 打印结果
for role in roles:
print(role.index, "".join(["%s:(%d,%d)" % (arg.name, arg.range.start, arg.range.end) for arg in role.arguments]))
labeller.release() # 释放模型
return roles
def main():
LTP_DATA_DIR = 'D:/Model/ltp_data_v3.4.0' # ltp模型目录的路径
text = '元芳你怎么看?我就趴窗口上看呗!'
ltp0 = ltp_analyzer(model_path=LTP_DATA_DIR)
ltp0.get_cut_words(text)
|
[
"32649526@qq.com"
] |
32649526@qq.com
|
0dfc8ddda7bfa6bebf915b9611cdf33ac8ec311a
|
7315525e0c1eda981db0b57d5e756282982e69a7
|
/demo_1A6N.py
|
f50f5d1845ff14d0ad5c58c39bd4547c1f0e20ca
|
[] |
no_license
|
hkthirano/Read_Amin_XYZ
|
ca1c38d87a7155f4cb5d74ed8be7e6c212f6b4b8
|
bd12842d4530df22d08428770876ebd5f83f5bae
|
refs/heads/master
| 2021-07-03T11:02:58.503756
| 2020-08-19T07:35:39
| 2020-08-19T07:35:39
| 145,704,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
from Read_XYZ.main import Read_xyz as r_xyz
config = ['A', 'NA', 'NA']
# config = ['A', '10', '100']
demo = r_xyz('./Data/1A6N.pdb', config)
# demo = r_xyz('./Data/1APS.pdb', config)
demo.matching_lines()
demo.save_matching_lines('./1A6N_line.txt')
demo.extract_xyz()
demo.save_extract_xyz('./1A6N_xyz.txt')
# VDWr:Van der Waals radius or IEP:Isoelectric point
demo.extract_xyz_with_weight('VDWr')
demo.save_extract_xyz('./1A6N_xyz_VDWr.txt')
|
[
"p238058h@gmail.com"
] |
p238058h@gmail.com
|
5829478376a030f8c92bf294427d412fcbee059d
|
b2fc7db783569a3d695f6017889dd6a09fe56b1d
|
/Database_creator.py
|
0025de95ecb9d8500ef3296c6576449bcee85b5a
|
[] |
no_license
|
Abhishek19895/Search_Indexing
|
7ec4ef89671bbc8dd700410aad91e2f3399c3e51
|
d4ec21d7e4c33c4399099a4343ddca87f048b5e1
|
refs/heads/master
| 2021-01-01T04:56:18.305788
| 2016-05-22T23:58:15
| 2016-05-22T23:58:15
| 58,711,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,739
|
py
|
"""
Author : Abhishek Singh
To create our search database from APIs, Amazon Public data, Stanford Database & Kaggle datasets
"""
##loading all the concerned libraries for this database packing
import os, sys, pandas as pd
import numpy as np
import json
from collections import defaultdict
import glob
#function for loading data from the 'Emails' csv file
def csv_loader():
"""
input: The csv file whose text is extracted
:return: Shall return a list will all the text loaded
"""
data = pd.read_csv('data/Reviews.csv')
data1 = data['Text'] #Extracting only the textual information
return list(data1)
#function for loading data from the Multiple csv files of NYTimes
def multiple_csv_loader():
"""
input: The list of csv files whose text is extracted
:return: Shall return a list will all the text loaded
"""
list1 = glob.glob('data/nyt_data/*.csv')
dfs = [] #To store all the files
for filename in list1:
data = pd.read_csv(filename)
#reading each row of the csv file
for i in range(data.shape[0]):
l = str(data.iloc[i]['headline'])
if l:
dfs.append(l)
#End of Inner loop for a single document
#End of Outer loop for all the documents
return dfs
#Function for loading txt files 'Movie' Review
def txt_loader():
"""
input: The list of txt files whose text is extracted
:return: Shall return a list will all the text loaded
"""
list1 = glob.glob('data/test/pos/*.txt') ; list2 = glob.glob('data/test/neg/*.txt')
list3 = glob.glob('data/train/pos/*.txt') ; list4 = glob.glob('data/train/neg/*.txt')
list = list1 + list2 + list3 + list4 #Appending all the files
all_files = [] #list to store all names
for fileName in list:
fin = open( fileName, "r" )
data_list = fin.readlines()
all_files.append(data_list)
return all_files
#Function to load data from Amazon Beauty JSON file
def json_loader1():
"""
Reading the the reviews JSON file and storing it as a list
Output: Shall return a list will all the text loaded
"""
data = []
with open('data/reviews_Beauty.json') as f:
for line in f:
data.append(json.loads(line))
data_list = []
#transforming the json files into a list of tuples
for i in data:
text = i.get('reviewText',None)
#removing unicodes from the various elements
if text:
text = text.encode('utf-8')
data_list.append(text)
return data_list
#Function to load data from Amazon Pets Review JSON file
def json_loader2():
"""
Reading the the reviews JSON file and storing it as a list
Output: Shall return a list will all the text loaded
"""
data = []
with open('data/reviews_Pet_Supplies.json') as f:
for line in f:
data.append(json.loads(line))
data_list = []
#transforming the json files into a list of tuples
for i in data:
text = i.get('reviewText',None)
#removing unicodes from the various elements
if text:
text = text.encode('utf-8')
data_list.append(text)
return data_list
#Function to download data from twitter
def twitter_data():
"""
Reading the the reviews JSON file and storing it as a list
Output: Shall return a list will all the text loaded
"""
data = []
with open('data/twitter.json') as f:
for line in f:
data.append(json.loads(line))
data_list = []
#transforming the json files into a list of tuples
for i in data:
text = i.get('text',None)
#removing unicodes from the various elements
if text:
text = text.encode('utf-8')
data_list.append(text)
return data_list
#Running the main function
if __name__ == '__main__':
"""
Running the main function and Creating our master database
"""
all_data = [] #Empty list
a = csv_loader() #Adding the data from Hillary Clinton's Emails (Kaggle) (568454 rows)
a = a + multiple_csv_loader() #Adding the NYtimes data (27794 rows)
a = a + txt_loader() #Adding the data from Stanford Database (50000 rows)
a = a + json_loader1() #Adding the Amazon beauty product reviews (2023082 rows)
a = a + json_loader2() #Adding the Amazon Pets reviews (1235329 rows)
a = a + twitter_data() #Adding the tweets (50000 rows)
all_data = pd.DataFrame(a) #Making a Dataframe of the giant list (4100 rows)
print " Exporting the dataframe to a txt file"
all_data
all_data.to_csv('full_data.txt', header = None, index=False)
|
[
"aasingh4@dons.usfca.edu"
] |
aasingh4@dons.usfca.edu
|
8ace0d505c55ded5686556aed557fdd36e7f2f76
|
7887a9337cef4b44bf1ccb80657558aa5e104d35
|
/src/canopen_301_402_old/canopen_301/obj_dict.py
|
b4e6641a782dcc3f276292e638c684d50a469e97
|
[
"MIT"
] |
permissive
|
wuhuaroubj/canopen_301_402
|
5c7ec60a1e8ac1257cd5c5879fde3a8b92adba84
|
16b189ffe631bb5812311131002d7f722de5fc66
|
refs/heads/master
| 2021-04-28T18:04:36.819618
| 2016-09-19T10:48:43
| 2016-09-19T10:48:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 471
|
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from collections import defaultdict
from canopen_301_402.constants import *
from canopen_301_402.canopen_301.obj import CanOpenObject
class CanOpenObjectDictionary(object):
"""docstring for CanOpenObjectDictionary"""
def __init__(self, node):
super(CanOpenObjectDictionary, self).__init__()
self.node = node
self.canopen = self.node.canopen
self.objects = defaultdict(lambda:None)
|
[
"xaedes@gmail.com"
] |
xaedes@gmail.com
|
0e96beb15ac5db5c0269be7d3634b98f33bb64d6
|
a3662e2b0aa375614c858c70a02690d21fcb090f
|
/config/settings.py
|
3f429044f9f960550dc1f4e5877684d7a6f27245
|
[] |
no_license
|
chiwon3/AIMProject
|
80bd8eba0f12bd10d322a084b9468d7f4beeafe7
|
ab0fd8174b7ef6588903806b442efbf2b885b174
|
refs/heads/master
| 2023-01-10T05:20:39.203654
| 2020-11-08T08:11:42
| 2020-11-08T08:11:42
| 289,417,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,129
|
py
|
"""
Django settings for AIM project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's92*)m__*r185h$6r$x9ovm23&9+*&awlno5fmdp^yk9pkjlu*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mypage.apps.MypageConfig',
'accounts.apps.AccountsConfig',
'board.apps.BoardConfig',
'webcrawl.apps.WebcrawlConfig',
'django.contrib.sites',
'django_summernote',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.kakao',
'allauth.socialaccount.providers.instagram',
'allauth.socialaccount.providers.naver',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.twitter',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
WSGI_APPLICATION = 'config.wsgi.application'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ko'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"chiwon3@gmail.com"
] |
chiwon3@gmail.com
|
6157b6267a9a2e0daf831ccea8a45e8b4d8efa43
|
27cdfc9d3e17a87668a436a490e1648344ceb2bf
|
/user_manager/migrations/0003_auto_20191101_2341.py
|
a3fc24bcade0cb4389ee492532bd780aede6cdd0
|
[] |
no_license
|
RadikSeyfullin/onw
|
b2c0a6d85ea1ca9ee0c70b2b1384fcc66ea78892
|
348734cccb82c6b3a72e7f9d61ea040f23123ed4
|
refs/heads/master
| 2020-09-01T03:56:32.743052
| 2020-04-10T06:38:07
| 2020-04-10T06:38:07
| 218,873,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
# Generated by Django 2.2.6 on 2019-11-01 20:41
from django.db import migrations, models
import user_manager.models
class Migration(migrations.Migration):
dependencies = [
('user_manager', '0002_auto_20191031_1756'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='avatar',
field=models.ImageField(default='static/images/users/default_logo.png', upload_to=user_manager.models.get_image_path, verbose_name='Аватар'),
),
]
|
[
"radikseyfullin@gmail.com"
] |
radikseyfullin@gmail.com
|
cfc185495dbf3f6d2211262d61fe63991cdf196b
|
3aaef97b44c2ec1a76125d49d9e91d1fa839da9a
|
/Algorithms/grading_students.py
|
fed3b7cf8c397dc8ab774b9e03502db22778ed97
|
[] |
no_license
|
basakrajarshi/HackerRankChallenges-Python
|
23018bbc8ba6d3470fb432b57bb608fee5479858
|
8757b4def7f32065c8158af636f4277149f19bf3
|
refs/heads/master
| 2020-04-15T01:39:08.266769
| 2019-01-23T00:41:52
| 2019-01-23T00:41:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
import os
import sys
#
# Complete the gradingStudents function below.
#
def gradingStudents(grades):
finalgrades = []
for grade in grades:
if (grade < 38):
#print("First if ",grade)
finalgrades.append(grade)
else:
#print("First else ", grade)
if (grade % 5 <= 2):
finalgrades.append(grade)
else:
finalgrades.append(grade + (5 - (grade % 5)))
return (finalgrades)
if __name__ == '__main__':
f = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
grades = []
for _ in range(n):
grades_item = int(input())
grades.append(grades_item)
result = gradingStudents(grades)
f.write('\n'.join(map(str, result)))
f.write('\n')
f.close()
|
[
"noreply@github.com"
] |
basakrajarshi.noreply@github.com
|
4065cbf5c44ed65e39aa9d9a05e918d0ccc17505
|
aa35a2c82d356c064a0115b486b1266d0fe75185
|
/problem8.py
|
f8ebcb3b376faad67fef059e5b79370d955ea17d
|
[] |
no_license
|
JellyWX/project-euler
|
f39da9c75cf4ccabecce8fa4e6f90f85bc68fe6c
|
cfa577a8cd87aef5a52a207fe58c55ab111cfef2
|
refs/heads/master
| 2021-01-13T08:00:12.557792
| 2020-10-02T17:55:45
| 2020-10-02T17:55:45
| 95,019,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
number = list('7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450')
highest_value = 0
for i in range(len(number) - 13):
product = 1
for j in range(13):
product *= int(number[i+j])
print(product)
if product > highest_value:
highest_value = product
print(highest_value)
|
[
"judewrs@gmail.com"
] |
judewrs@gmail.com
|
d8848ccdf65a02f944b270afbc81cd3c00ca92c5
|
a17c820cbbcb82a93b1675a3d1cf37d03440f7f3
|
/StyleTransfer.py
|
2510d272202c9daf8760833e59fabd32fa7731db
|
[] |
no_license
|
Ahaeflig/semantic_map
|
20a8250cde3fc045a36ad9f9d12e3259a0e7f97f
|
140d61a3ae677308955454c7be0492a1392b141a
|
refs/heads/master
| 2021-09-14T17:15:59.509477
| 2018-05-16T13:26:06
| 2018-05-16T13:26:06
| 104,345,328
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,849
|
py
|
import tensorflow as tf
import numpy as np
import skimage
import skimage.transform
from helpers import *
from helpers import getLaplacian
from VOChelpers import *
# IO
import matplotlib.pyplot as plt
from smooth_local_affine import smooth_local_affine
# Datastructure
from collections import OrderedDict
# Custom
import vgg19
import vgg19_atrous
VGG_PATH = 'data/vgg/'
DATA_PATH = 'data/'
class StyleTransfer:
#TODO could create a mask object instead of all this stuff in params
def __init__(self, content_layer_name, style_layers_name, mask_layer_name, init_image, content_image, style_image, session, num_iter,
content_loss_weight, style_loss_weight, K=15, normalize=False, debug=False, orphan=True, use_dcrf=False, matting_loss=1000, tv_loss=1000, soft_temp=0.1, voc_names=[], k_style=[]):
# Check same image size (can extend to multiple image sizes)
assert (content_image.shape == style_image.shape)
'''
=======================================
Store Params
=======================================
'''
self.K = K
self.content_layer_name = content_layer_name
self.style_layers_name = style_layers_name
self.mask_layer_name = mask_layer_name
self.init_image = init_image
self.content_image = content_image
self.style_image = style_image
self.num_iter = num_iter
self.sess = session
self.content_loss_weight = content_loss_weight
self.style_loss_weight = style_loss_weight
self.matting_loss = matting_loss
self.tv_loss = tv_loss
self.debug = debug
self.image_final = 0
self.voc_names = voc_names
# choose K used for style transfer
if not k_style:
self.k_style = np.array(range(K))
else:
self.k_style = k_style
'''
==============================
Pre-compute VGG stuff
==============================
'''
# Vgg
# Only one graph is needed cause we assume the shape of all the images are the same
self.vgg = vgg19.Vgg19(VGG_PATH + 'vgg19.npy')
self.vgg_shape = [1, content_image.shape[0], content_image.shape[1], content_image.shape[2]]
images = tf.placeholder("float", self.vgg_shape, name="images")
self.vgg.build(images)
# Content from content image and style image (remember eval_layers() return dicts)
self.content_layer = eval_layers(self.sess, self.content_image, self.vgg, self.content_layer_name)
self.style_layer = eval_layers(self.sess, self.style_image, self.vgg, self.content_layer_name)
# Mask layers, note that eval_layers expects array of names
self.mask_content_layer = eval_layers(self.sess, content_image, self.vgg, [self.mask_layer_name])
self.mask_style_layer = eval_layers(self.sess, style_image, self.vgg, [self.mask_layer_name])
''' VGG atrous stuff
self.vgg_atrous = vgg19_atrous.Vgg19_Atrous(VGG_PATH + 'vgg19.npy')
self.vgg_atrous.build(images)
self.mask_content_layer = eval_layers(self.sess, content_image, self.vgg_atrous, [self.mask_layer_name])
self.mask_style_layer = eval_layers(self.sess, style_image, self.vgg_atrous, [self.mask_layer_name])
'''
# Style from style image, precomputed since the gram doesn't change
self.style_layers = eval_layers(self.sess, style_image, self.vgg, self.style_layers_name)
self.grams_style = {layer_name: self.compute_gram(layer) for layer_name, layer in self.style_layers.items()}
'''
==================
Mask processing
==================
'''
# Masks between the content layer of content image and style image
self.A = compute_affinity_matrix(self.mask_content_layer.get(self.mask_layer_name),
self.mask_style_layer.get(self.mask_layer_name))
self.orphan = 0
# If you want to use VOC segmentation it's possible using the voc_names with voc imagenames
if not voc_names:
if self.K > 0:
L, R = get_masks(K, self.A, self.mask_content_layer.get(self.mask_layer_name).shape[1:3],
self.mask_style_layer.get(self.mask_layer_name).shape[1:3], orphan=orphan, normalize=normalize, soft_temp=soft_temp)
#Add orphan masks
if orphan:
self.orphan = 1
if debug:
show_masks(self.content_image, self.style_image, L, R, self.K + self.orphan, show_axis=True)
if use_dcrf:
L = [crf(content_image * 255, l)[0] for l in L]
R = [crf(style_image * 255, r)[0] for r in R]
if debug:
show_masks(self.content_image, self.style_image, L, R, self.K + self.orphan)
self.L = L
self.R = R
else:
#Get voc masks
L_voc = load_voc_mask(voc_names[0], shape=self.content_image.shape)
R_voc = load_voc_mask(voc_names[1], shape=self.content_image.shape)
# Drop border and turn to binary
self.L = toMultipleArray(pngToMaskFormat(L_voc))[0:20].astype("float32")
self.R = toMultipleArray(pngToMaskFormat(R_voc))[0:20].astype("float32")
self.K = self.L.shape[0]
if debug:
show_masks(self.content_image, self.style_image, self.L, self.R, self.K, show_axis=True, normalized=True, vmax=1)
self.build_and_optimize()
def build_and_optimize(self):
# We optimize on G
self.G = tf.Variable(self.init_image[None, ...], name="G", dtype=tf.float32)
self.vgg_g = vgg19.Vgg19(VGG_PATH + 'vgg19.npy')
self.vgg_g.build(self.G)
# Get G content
self.G_content_layer = get_layers(self.vgg_g, self.content_layer_name)
# Get G styles
self.G_style_layers = get_layers(self.vgg_g, self.style_layers_name)
with tf.name_scope("train"):
cost_content = self.content_loss()
self.final_content_loss = self.content_loss_weight * cost_content
if self.K > 0 or self.voc_names:
cost_style = self.style_loss_masks()
else:
cost_style = self.style_loss_normal()
#cost_style = self.style_loss_masks_dummy()
#cost_style += self.affinity_loss()
self.final_style_loss = self.style_loss_weight * (1 / len(self.style_layers_name)) * cost_style
#Matting
M = tf.to_float(getLaplacian(self.content_image))
self.loss_affine = self.affine_loss(self.G, M, self.matting_loss)[0][0]
self.loss_tv = self.total_variation_loss(self.G, self.tv_loss)
#loss_tv = 0.0001
loss = self.final_content_loss + self.final_style_loss + self.loss_affine + self.loss_tv
global iter_count
iter_count = 0
train_step = tf.contrib.opt.ScipyOptimizerInterface(
loss,
method='L-BFGS-B',
options={'maxiter': self.num_iter,
})
self.sess.run(tf.global_variables_initializer())
print("Start Training")
training = train_step.minimize(self.sess, fetches=[self.final_content_loss, self.final_style_loss, self.loss_affine, self.loss_tv], loss_callback=self.callback)
with tf.name_scope("image_out"):
image_out = tf.clip_by_value(tf.squeeze(self.G, [0]), 0, 1)
'''
for i in range(0, self.num_iter):
cost_p, _, cost_style_p, cost_content_p = self.sess.run([loss, training, cost_style, cost_content])
if i % 300 == 0:
print("iter = {}, total_cost = {}, cost_content = {}, cost style = {}".format(str(i),
cost_p,
cost_content_p,
cost_style_p))
'''
# Output
image_final = self.sess.run(image_out)
'''SMOOTH LOCAL AFFINE => CUDA ERROR'''
'''
content_input = np.array(self.content_image, dtype=np.float32)
# RGB to BGR
#content_input = content_input[:, :, ::-1]
# H * W * C to C * H * W
content_input = content_input.transpose((2, 0, 1))
input_ = np.ascontiguousarray(content_input, dtype=np.float32)
_, H, W = np.shape(input_)
output_ = np.ascontiguousarray(image_final.transpose((2, 0, 1)), dtype=np.float32)
best_ = smooth_local_affine(output_, input_, 1e-7, 3, H, W, 15, 1e-1).transpose(1, 2, 0)
result = Image.fromarray(np.uint8(np.clip(best_ * 255., 0, 255.)))
'''
plt.figure(figsize=(14, 12))
plt.subplot(4, 4, 1)
plt.imshow(self.init_image)
plt.subplot(4, 4, 2)
plt.imshow(self.content_image)
plt.subplot(4, 4, 3)
plt.imshow(self.style_image)
plt.subplot(4, 4, 4)
plt.imshow(image_final)
#writer = tf.summary.FileWriter("output", self.sess.graph)
self.image_final = image_final;
# Return the final image, usefull for two step optimization
def get_final(self):
return self.image_final
# Function at each step
def callback(self, content_loss, style_loss, affine_loss, tv_loss):
global iter_count;
if iter_count % 100 == 0:
if self.debug:
print('Iteration {} / {} '.format(iter_count, self.num_iter))
print('Content loss: {} / No weight: {}'.format(content_loss, content_loss / self.content_loss_weight))
print('Style loss: {} / No weight: {}'.format(style_loss, style_loss / self.style_loss_weight))
print('Affine loss: {} / No weight: {}'.format(affine_loss, affine_loss / self.matting_loss))
print('Tv loss: {} / No weight: {}'.format(tv_loss, affine_loss / self.tv_loss))
iter_count += 1
# Input is of shape [B, H, W, C]
def compute_gram(self, array):
shape = array.shape
matrix = np.reshape(array, [shape[1] * shape[2], shape[3]])
return np.matmul(matrix.transpose(), matrix)
# Input is of shape [B, H, W, C]
def compute_gram_tensor(self, tensor):
shape = tensor.get_shape()
matrix = tf.reshape(tensor, shape=[-1, int(shape[3])])
return tf.matmul(matrix, matrix, transpose_a=True)
# Computes the style loss over all K for each layer
def style_loss_masks(self):
loss = 0
for key in self.style_layers:
for k in range(0, self.K + self.orphan):
if k in self.k_style:
tensor = self.G_style_layers.get(key)
shape_g = tensor.get_shape().as_list()
g_mask = skimage.transform.resize(self.L[k][..., None], (shape_g[1], shape_g[2]),
mode='constant', order=0)
weighted_g_layer = tf.multiply(tensor, g_mask)
gram_g = self.compute_gram_tensor(weighted_g_layer)
g_mask_mean = tf.to_float(tf.reduce_mean(g_mask))
# Deep photo style transfer way, I prefer my way
'''
gram_g = tf.cond(tf.greater(g_mask_mean, 0.),
lambda: gram_g / (tf.to_float(tf.size(tensor)) * g_mask_mean),
lambda: gram_g
)
'''
layer_s = self.style_layers.get(key)
shape_s = layer_s.shape
s_mask = skimage.transform.resize(self.R[k][..., None], (shape_s[1], shape_s[2]),
mode='constant', order=0)
weighted_s_layer = tf.multiply(layer_s, s_mask)
gram_s = self.compute_gram_tensor(weighted_s_layer)
s_mask_mean = tf.to_float(tf.reduce_mean(s_mask))
'''
gram_s = tf.cond(tf.greater(s_mask_mean, 0.),
lambda: gram_s / (tf.to_float(layer_s.size) * s_mask_mean),
lambda: gram_s
)
'''
M = shape_g[3]
N = shape_g[1] * shape_g[2]
loss += tf.reduce_mean(tf.squared_difference(gram_g, gram_s)) * (1. / (4 * (N ** 2) * (M ** 2)) * ((g_mask_mean+s_mask_mean) / 2))
return loss
def style_loss_masks_old(self):
loss = 0
for key in self.style_layers:
for k in range(0, self.K + self.orphan):
tensor = self.G_style_layers.get(key)
shape_g = tensor.get_shape().as_list()
weighted_g_layer = tensor * skimage.transform.resize(self.L[k][..., None], (shape_g[1], shape_g[2]),
mode='constant', order=0)
gram_g = self.compute_gram_tensor(weighted_g_layer)
layer_s = self.style_layers.get(key)
shape_s = layer_s.shape
weighted_s_layer = layer_s * skimage.transform.resize(self.R[k][..., None], (shape_s[1], shape_s[2]),
mode='constant', order=0)
gram_s = self.compute_gram(weighted_s_layer)
M = shape_g[3]
N = shape_g[1] * shape_g[2]
loss += tf.reduce_mean(tf.pow((gram_g - gram_s), 2) * (1. / (4 * N ** 2 * M ** 2)))
return loss
# loss used with test square mask
def style_loss_masks_dummy(self):
loss = 0
L = self.L[0]
R = self.R[0]
L = np.reshape(L, [L.shape[0], L.shape[1], 1])
R = np.reshape(R, [R.shape[0], R.shape[1], 1])
L = np.zeros(L.shape).astype("float32")
L[L.shape[0] // 4:-L.shape[0] // 4, L.shape[1] // 4:-L.shape[1] // 4] = 1
R = np.zeros(R.shape).astype("float32")
R[R.shape[0] // 4:-R.shape[0] // 4, R.shape[1] // 4:-R.shape[1] // 4] = 1
# L = L[:, :, 0]
# R = R[:, :, 0]
if (self.debug):
plt.figure()
plt.imshow(L[:, :, 0])
plt.figure()
plt.imshow(R[:, :, 0])
for i in range(0, self.K):
for key in self.style_layers:
tensor = self.G_style_layers.get(key)
shape_g = tensor.get_shape().as_list()
# upscale masks to be same size as tensor
currL = skimage.transform.resize(L, (shape_g[1], shape_g[2]), mode='constant', order=0)
weighted_g_layer = tensor * currL
gram_g = self.compute_gram_tensor(weighted_g_layer)
layer_s = self.style_layers.get(key)
shape_s = layer_s.shape
currR = skimage.transform.resize(R, (shape_s[1], shape_s[2]), mode='constant', order=0)
weighted_s_layer = layer_s * currR
gram_s = self.compute_gram(weighted_s_layer)
M = shape_g[3]
N = shape_g[1] * shape_g[2]
loss += (1/self.K) * tf.reduce_mean(tf.pow((gram_g - gram_s), 2)) * (1. / (4 * N ** 2 * M ** 2))
return loss
# Basic style loss, used if K = 0
def style_loss_normal(self):
loss = 0
for key in self.style_layers:
tensor = self.G_style_layers.get(key)
shape = tensor.get_shape().as_list()
gram_g = self.compute_gram_tensor(tensor)
gram_s = self.grams_style.get(key)
M = shape[3]
N = shape[1] * shape[2]
loss += tf.reduce_mean(tf.pow((gram_g - gram_s), 2)) * (1. / (4 * (N ** 2) * (M ** 2)))
return loss
# Content loss term over a layer
def content_loss(self):
shape = self.content_layer.get(self.content_layer_name[0]).shape
return tf.reduce_mean(tf.pow(
self.content_layer.get(self.content_layer_name[0]) - self.G_content_layer.get(self.content_layer_name[0]),
2)) * 1. / (2 * shape[3] * shape[1] * shape[2])
# When tested affinity loss MSE
def affinity_loss(self):
return tf.reduce_mean(tf.pow(
self.A - self.compute_affinity_matrix_tf(self.G_content_layer.get(self.content_layer_name[0]),
tf.convert_to_tensor(
self.style_layer.get(self.content_layer_name[0]))),
2)) / 2
# Maps shape should be [1, H, W, C]
def compute_affinity_matrix_tf(self, content_maps, reference_maps):
print(content_maps)
print(reference_maps)
content_maps = tf.squeeze(content_maps, axis=0)
reference_maps = tf.squeeze(reference_maps, axis=0)
content_maps = tf.reshape(content_maps, [int(content_maps.get_shape()[0]) * int(content_maps.get_shape()[1]),
int(content_maps.get_shape()[2])])
reference_maps = tf.reshape(reference_maps,
[int(reference_maps.get_shape()[0]) * int(reference_maps.get_shape()[1]),
int(reference_maps.get_shape()[2])])
return tf.matmul(content_maps, reference_maps, transpose_b=True)
# Photorealistic loss from deep_photo style transfer
def affine_loss(self, output, M, weight):
loss_affine = 0.0
output_t = tf.to_float(output / 255.)
for Vc in tf.unstack(output_t, axis=-1):
Vc_ravel = tf.reshape(tf.transpose(Vc), [-1])
loss_affine += tf.matmul(tf.expand_dims(Vc_ravel, 0),
tf.sparse_tensor_dense_matmul(M, tf.expand_dims(Vc_ravel, -1)))
return loss_affine * weight
# tv loss also from deep photo style transfer
def total_variation_loss(self, output, weight):
shape = output.get_shape()
tv_loss = tf.reduce_sum((output[:, :-1, :-1, :] - output[:, :-1, 1:, :]) * (output[:, :-1, :-1, :] - output[:, :-1, 1:, :]) + \
(output[:, :-1, :-1, :] - output[:, 1:, :-1, :]) * (output[:, :-1, :-1, :] - output[:, 1:, :-1, :])) / 2.0
return tv_loss * weight
|
[
"adan.haefliger@gmail.com"
] |
adan.haefliger@gmail.com
|
a23dc84e74dd291b4b5d666d5f5ad24c9cf3b9dd
|
00d98ce8989df24850e22d4a8c0ec5ce8caab5cc
|
/hyperv/tests/unit/neutron/test_mech_hyperv.py
|
e0996c1aa8250ce42b1f18121b267fc1b8a199f1
|
[
"Apache-2.0"
] |
permissive
|
costingalan/networking-hyperv-debian
|
18a9d616550baf8f9b39134198505e480d7fa06c
|
b393d61059c8d6101610e855b90694a4b2c15fa0
|
refs/heads/master
| 2021-01-15T09:27:45.332769
| 2016-04-07T14:32:30
| 2016-04-08T11:54:16
| 55,423,283
| 0
| 1
| null | 2016-04-04T15:37:13
| 2016-04-04T15:37:13
| null |
UTF-8
|
Python
| false
| false
| 1,753
|
py
|
# Copyright 2015 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the Hyper-V Mechanism Driver.
"""
from hyperv.neutron import constants
from hyperv.neutron.ml2 import mech_hyperv
from hyperv.tests import base
class TestHypervMechanismDriver(base.BaseTestCase):
def setUp(self):
super(TestHypervMechanismDriver, self).setUp()
self.mech_hyperv = mech_hyperv.HypervMechanismDriver()
def test_get_allowed_network_types(self):
agent = {'configurations': {'tunnel_types': []}}
actual_net_types = self.mech_hyperv.get_allowed_network_types(agent)
network_types = [constants.TYPE_LOCAL, constants.TYPE_FLAT,
constants.TYPE_VLAN]
self.assertEqual(network_types, actual_net_types)
def test_get_allowed_network_types_nvgre(self):
agent = {'configurations': {'tunnel_types': [constants.TYPE_NVGRE]}}
actual_net_types = self.mech_hyperv.get_allowed_network_types(agent)
network_types = [constants.TYPE_LOCAL, constants.TYPE_FLAT,
constants.TYPE_VLAN, constants.TYPE_NVGRE]
self.assertEqual(network_types, actual_net_types)
|
[
"cgalan@cloudbasesolutions.com"
] |
cgalan@cloudbasesolutions.com
|
4738b14e85db765748a944082898280fecba8da3
|
a551f93d7362695584c7ff71b069839bd6185383
|
/hist_data_daily.py
|
e7ae48c343c8e77bf715254b790c7cf444dfd78e
|
[
"Apache-2.0"
] |
permissive
|
fcimclub/zipline_localization
|
243cbf34b0950b9417b602781ad73ff94f1c09e9
|
a1912fba3fa63a30686707561429b2c90bba2f0c
|
refs/heads/master
| 2020-03-26T21:35:58.877986
| 2018-08-20T09:36:57
| 2018-08-20T09:36:57
| 145,397,266
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
"""
Created on Thu May 17 14:00:02 2018
@author: l_cry
"""
# 导入模块
import tushare as ts
from datetime import datetime, timedelta
import time
import numpy as np
from sqlalchemy import create_engine
import pymysql
pymysql.install_as_MySQLdb()
engine = create_engine('mysql://root:120402@127.0.0.1:3306/data_stock?charset=utf8')
stock_basics = ts.get_stock_basics()
stock_basics.reset_index(inplace=True)
stock_basics.rename(columns={'index': 'code'}, inplace=True)
# stock_basics.to_sql('stock_market_info',con=engine,if_exists='append',index=False)
now = datetime.now()
end = now.strftime("%Y-%m-%d")
start = (now - timedelta(days=1)).strftime("%Y-%m-%d")
def insert_to_db(code_list, success_list=[], none_list=[], fail_list=[]):
for code in set(code_list) - set(success_list) - set(none_list):
try:
df = ts.get_h_data(code, autype=None, drop_factor=False, pause=5, retry_count=5)
# df = ts.get_h_data(code, autype=None, drop_factor=False, pause=5, retry_count=5, start=start, end=end)
if df.shape[0] != 0:
df.reset_index(inplace=True)
df['code'] = code
df['update_time'] = datetime(2018, 8, 4, 0, 0)
df.to_sql('hist_data_daily', con=engine, if_exists='append', index=False)
success_list.append(code)
else:
none_list.append(code)
except:
fail_list.append(code)
time.sleep(10)
return success_list, none_list, fail_list
code_list = stock_basics.code
fail_list = []
success_list = []
none_list = []
i = 0
while len(set(code_list) - set(success_list) - set(none_list)) > 0 and i < 10000:
success_list, none_list, fail_list = insert_to_db(code_list, success_list=success_list, none_list=none_list,
fail_list=fail_list)
i += 1
re = engine.execute('SELECT DISTINCT(code) from hist_data_daily')
res = re.fetchall()
success_list = list(map(lambda x:x[0],res))
|
[
"l_crystalmei@163.com"
] |
l_crystalmei@163.com
|
f976ad877067cfc9f59726ccf2c189335c1fca12
|
9d8d72ed8966fe9b1d170d08eba2f513eaff5e41
|
/mysite/settings.py
|
4a1acc10ceff59ebd853595154a07b1d44a45b05
|
[] |
no_license
|
StruggleWugc/mysite
|
a99c8ec2be2e3bea23b34069078a19fb69053839
|
28d00a5bc5388654e455dd1977294fce3ed2d520
|
refs/heads/master
| 2020-08-02T05:30:25.672050
| 2019-09-27T06:18:48
| 2019-09-27T06:18:48
| 211,249,284
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,454
|
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_#)-p0h^84q#t3sd2f5@evd)+peg99auo@d$17)8n1h_-(x8s^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'login',
'captcha',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = 'v9081759725@gmail.com'
EMAIL_HOST_PASSWORD = 'vigo.12345678'
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = EMAIL_HOST_USER
CONFIRM_DAYS = 1
LOGIN_URL = '/login/'
|
[
"260043657@qq.com"
] |
260043657@qq.com
|
3e0624c33598f87f639b58f024008cb9d83b3943
|
212b50b723606a04f813abf7b74678ef0c32b6cc
|
/namesandtitlesfromurl.py
|
fafb61ec8a5f10affbe90e2f42c59ae4c06f9bad
|
[] |
no_license
|
Metallicode/namesandtitlesfromurl
|
27f0d74c7001754bd19588ffa83793817a5c240b
|
7aef7d4496cc59c6048f4feeb739a4d9745c5904
|
refs/heads/main
| 2023-08-30T21:44:54.396318
| 2021-10-22T11:44:06
| 2021-10-22T11:44:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
#get live data
def _GetDataFromWeb(url, liveData = True):
if liveData:
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"}
response = requests.get(url, headers=headers)
data = response.text
else:
with open("file.txt", 'r') as f:
data = f.read()
return data
def _ParseData(data):
parsedData = BeautifulSoup(data, 'html.parser')
tables = parsedData.findAll("table")
theTable = None
for table in tables:
if '/s/' in table.text and 'Director' in table.text:
theTable = table
break
if theTable is None:
theTable = tables[-1]
rows = theTable.findAll('tr')
validstrings = []
for row in rows:
text = row.text
#print(text)
if text and text.strip():
t = text.replace(u'\u200b\u200b\u200b\u200b', u'').replace(u'\u200b', u'%').replace(u'/s/', u'')
validstrings.append([x.split('%') for x in t.strip().split("\xa0\xa0\xa0\xa0")])
#validstrings.append(t)
validstrings = [x[:2] for x in validstrings if x[0][0]!='']
finedata = []
for i in validstrings:
if len(i)>1:
finedata.append([i[0][0],i[1][0]])
else:
finedata.append(i[0][:2])
names = [x[0] for x in finedata[1:]]
titles = [x[1] for x in finedata[1:]]
df = pd.DataFrame(names, columns=['Name'])
df['Title'] = titles
return df
def _geturlandsavetofile(url):
data = _GetDataFromWeb(url)
with open("file.txt", 'w') as f:
f.write(data)
def Run(url, liveData=True):
return _ParseData(_GetDataFromWeb(url, liveData))
if __name__ == '__main__':
#_geturlandsavetofile("https://www.sec.gov/Archives/edgar/data/2186/0001654954-20-002248.txt")
print(Run("", False))
|
[
"gigahertzvectorizer@gmail.com"
] |
gigahertzvectorizer@gmail.com
|
f052b98290cc3160b827a9ea75f74adb6f505f38
|
17aabb4c2b24831fd7b1f42d9466692ab0fee6dd
|
/http
|
40079d109c8903aecfda53e9f5087bf698322bcd
|
[] |
no_license
|
kybr/AudioWorklet
|
e5927d303f7555f89850184d6a2facb42e036e01
|
a9497a737fc383bf925c5565e0e532bf00d69b25
|
refs/heads/master
| 2020-05-19T09:42:35.906735
| 2019-05-06T14:29:47
| 2019-05-06T14:29:47
| 184,955,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
#!/usr/bin/env python3
from http.server import HTTPServer,SimpleHTTPRequestHandler
#from socketserver import BaseServer
#import ssl
host = ('0.0.0.0', 8000)
httpd = HTTPServer(host, SimpleHTTPRequestHandler)
#httpd.socket = ssl.wrap_socket (httpd.socket, certfile='certificate.pem', server_side=True)
print(f"http://{host[0]}:{host[1]}/")
httpd.serve_forever()
# generate server.xml with the following command:
# openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes
# run as follows:
# python simple-https-server.py
# then in your browser, visit:
# https://localhost:4443
|
[
"karl.yerkes@gmail.com"
] |
karl.yerkes@gmail.com
|
|
ce6b37ae1c97a5184cf411a67ad0265dbefb8ffc
|
891970bb71242ce2d09f125437d9e7d3f0a883df
|
/testUnit/dataSetTest.py
|
8eb721bef3574d5dbbf5ea4c95a1c0c0d40ab540
|
[] |
no_license
|
donesky/ExpressionRecognition
|
fa71fab5b2ae7446a8402f2f26293de3c2eeb91f
|
a58792af8b8ad2a8e823a617502677d5ce5f7f37
|
refs/heads/master
| 2021-01-22T10:55:38.714711
| 2017-04-05T11:27:46
| 2017-04-05T11:27:46
| 82,048,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
import pytest
import testUnit
import preProcess.adjust as ad
import math
from numpy import *
# content of test_assert1.py
def test_function():
assert ad.Distance((1,1), (1,1)) == 0
assert ad.Distance((-1, 1), (0, 0)) == math.sqrt(2)
group = array([[1.0, 0.9], [1.0, 1.0], [0.1, 0.2], [0.0, 0.1]])
labels = ['A', 'A', 'B', 'B'] # four samples and two classes
testX = array([1.2, 1.0])
|
[
"dongqiuyealex@gmail.com"
] |
dongqiuyealex@gmail.com
|
d3482d3e49faeac27443f951c71c93ee3571612c
|
9a15e52c77d6c57eb5325ef78febf9c8590b410b
|
/social/serializers.py
|
f19f968dc7e6cba19433cd9284032e997852ef08
|
[] |
no_license
|
echopulse/DjangoSocialNetwork
|
6ae33657124470d18a27d4ad9517d6b23dc49e69
|
99f890e4e931275bc2a269cfc358cd28ef51afe2
|
refs/heads/master
| 2021-01-16T02:28:49.377144
| 2015-07-08T20:23:01
| 2015-07-08T20:23:01
| 32,706,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
from rest_framework import serializers
from social.models import Message, Member
class MemberSerializer(serializers.HyperlinkedModelSerializer):
receiver_member = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='message-detail'
)
sender_member = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='message-detail'
)
class Meta:
model = Member
fields = ('url', 'username', 'password', 'receiver_member', 'sender_member')
extra_kwargs = {'password': {'write_only': True}}
class MessageSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Message
fields = ('url', 'id', 'time', 'message', 'receiver', 'sender', 'is_private')
'''
class MemberSerializer(serializers.Serializer):
username = serializers.CharField(max_length=16)
password = serializers.CharField(max_length=16, write_only=True)
receiver_member = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='message-detail'
)
sender_member = serializers.HyperlinkedRelatedField(
many=True,
read_only=True,
view_name='message-detail'
)
class MessageSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
time = serializers.DateTimeField(read_only=True)
message = serializers.CharField(max_length=4096)
receiver = serializers.CharField(max_length=16)
sender = serializers.CharField(max_length=16)
is_private = serializers.BooleanField(required=False)
'''
|
[
"fil12@outlook.com"
] |
fil12@outlook.com
|
d9d8996dbd9f69be2cfc540fe18287acc5a429fb
|
94c8e3ec06a6294fc43f35bc7aa69e4ea5744d24
|
/blog/migrations/0014_auto_20210204_0250.py
|
c9030c079d97ca1269b91337f30c76a4692bcaaf
|
[] |
no_license
|
jonoise/portfolio_app
|
40383a813544ebd69d5376bcb7b0ab9df92e53ed
|
8aabc9c339f96124d5e2e2b1de00ccb6fc05cd4c
|
refs/heads/main
| 2023-08-20T05:26:47.773045
| 2021-09-28T21:35:40
| 2021-09-28T21:35:40
| 333,677,444
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
# Generated by Django 3.1.5 on 2021-02-04 02:50
import ckeditor.fields
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0013_auto_20210202_0236'),
]
operations = [
migrations.AlterField(
model_name='post',
name='description',
field=ckeditor.fields.RichTextField(blank=True, null=True),
),
migrations.AlterField(
model_name='post',
name='publish',
field=models.DateTimeField(default=datetime.datetime.now),
),
]
|
[
"yellowballpython@gmail.com"
] |
yellowballpython@gmail.com
|
0bba6a5e8196d1bdbe08b4775e5de437a9fd2292
|
f8278cd8ce31833dccb178fd1ecc1c2511fd5f06
|
/venv/Scripts/pip3.7-script.py
|
1428fb49f8bf18ebcfb3cd63119d5c6244c85c7c
|
[] |
no_license
|
meshall2019/Python
|
c5cf97ff16be674f73f8bee10cdf82765c6f924f
|
793dea9023b5c9eba88a27db1e527314c666350f
|
refs/heads/master
| 2022-04-05T04:49:12.483371
| 2020-02-26T11:08:32
| 2020-02-26T11:08:32
| null | 0
| 0
| null | null | null | null |
ISO-8859-5
|
Python
| false
| false
| 420
|
py
|
#!C:\Users\удкс\PycharmProjects\Test\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"mmas3l222@gamil.com"
] |
mmas3l222@gamil.com
|
23cb16f776bea63efaabfe15df06e08e59b44c8d
|
bddad6fbb4d991eceecff417f2de4f763413001d
|
/sage/query_engine/update/delete.py
|
2b7668c20cf80df087c6efc5b2f21e98e87afe61
|
[
"MIT"
] |
permissive
|
GDD-Nantes/sage-engine
|
39ff837bde5eb1a7eadd61548881a0889bed63fe
|
ea06b77b15b13f5297e937718c1c742e8afcc017
|
refs/heads/master
| 2021-01-02T02:41:56.890666
| 2020-02-04T10:17:01
| 2020-02-04T10:17:01
| 239,457,429
| 1
| 0
|
MIT
| 2020-02-10T08:00:30
| 2020-02-10T08:00:29
| null |
UTF-8
|
Python
| false
| false
| 2,446
|
py
|
# insert.py
# Author: Thomas MINIER - MIT License 2017-2020
from typing import Dict, List, Optional, Tuple
from sage.database.core.dataset import Dataset
from sage.query_engine.iterators.preemptable_iterator import PreemptableIterator
from sage.query_engine.protobuf.iterators_pb2 import SavedDeleteData
from sage.query_engine.protobuf.utils import pyDict_to_protoDict
class DeleteOperator(PreemptableIterator):
"""A DeleteOperator deletes RDF triples from a RDF dataset.
Args:
* quads: List of RDF quads to delete from the RDF dataset.
* dataset: RDF dataset.
"""
def __init__(self, quads: List[Tuple[str, str, str, str]], dataset: Dataset):
super(DeleteOperator, self).__init__()
self._quads = quads
self._dataset = dataset
# we store how many triples were inserted in each RDF graph
self._inserted = dict()
def __repr__(self) -> str:
return f"<DeleteOperator quads={self._quads}>"
def serialized_name(self) -> str:
"""Get the name of the iterator, as used in the plan serialization protocol"""
return "delete"
def has_next(self) -> bool:
"""Return True if the iterator has more quads to delete"""
return len(self._quads) > 0
async def next(self) -> Optional[Dict[str, str]]:
"""Delete the next quad from the RDF dataset.
This function works in an iterator fashion, so it can be used in a pipeline of iterators.
It may also contains `non interruptible` clauses which must
be atomically evaluated before preemption occurs.
Returns: The quad if it was successfully deleted, otwherise it returns `None`.
Throws: `StopAsyncIteration` if the iterator has no more quads to delete.
"""
if not self.has_next():
raise StopAsyncIteration()
s, p, o, g = self._quads.pop()
if self._dataset.has_graph(g):
self._dataset.get_graph(g).delete(s, p, o)
# update counters
if g in self._inserted:
self._inserted[g] += 1
else:
self._inserted[g] = 0
return {"?s": s, "?p": p, "?o": o, "?graph": g}
return None
def save(self) -> SavedDeleteData:
"""Save and serialize the iterator as a Protobuf message"""
saved = SavedDeleteData()
pyDict_to_protoDict(self._inserted, saved.nb_inserted)
return saved
|
[
"tminier01@gmail.com"
] |
tminier01@gmail.com
|
520eacd3cd22300038f00e68a68c9d2e1efc4382
|
47760e6a1f05d3a0a75fc9da035abc12011da4f4
|
/deep_sort/tracker.py
|
ad22af44e226231a4b6e0bf478409d6f046f165d
|
[
"MIT"
] |
permissive
|
pythonlessons/TensorFlow-2.x-YOLOv3
|
bad66c771295b7053c6df53f2caaba209c9a0fbc
|
9f29d73ee24cd5db4ead280f95ff06f66d538fc2
|
refs/heads/master
| 2023-08-07T07:46:43.414113
| 2022-10-04T06:56:21
| 2022-10-04T06:56:21
| 257,681,773
| 632
| 385
|
MIT
| 2022-10-04T06:55:59
| 2020-04-21T18:29:55
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,485
|
py
|
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from . import kalman_filter
from . import linear_assignment
from . import iou_matching
from .track import Track
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : kalman_filter.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
def __init__(self, metric, max_iou_distance=0.7, max_age=30, n_init=3):
self.metric = metric
self.max_iou_distance = max_iou_distance
self.max_age = max_age
self.n_init = n_init
self.kf = kalman_filter.KalmanFilter()
self.tracks = []
self._next_id = 1
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
for track in self.tracks:
track.predict(self.kf)
def update(self, detections):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
# Run matching cascade.
matches, unmatched_tracks, unmatched_detections = \
self._match(detections)
# Update track set.
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(
self.kf, detections[detection_idx])
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx])
self.tracks = [t for t in self.tracks if not t.is_deleted()]
# Update distance metric.
active_targets = [t.track_id for t in self.tracks if t.is_confirmed()]
features, targets = [], []
for track in self.tracks:
if not track.is_confirmed():
continue
features += track.features
targets += [track.track_id for _ in track.features]
track.features = []
self.metric.partial_fit(
np.asarray(features), np.asarray(targets), active_targets)
def _match(self, detections):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = np.array([dets[i].feature for i in detection_indices])
targets = np.array([tracks[i].track_id for i in track_indices])
cost_matrix = self.metric.distance(features, targets)
cost_matrix = linear_assignment.gate_cost_matrix(
self.kf, cost_matrix, tracks, dets, track_indices,
detection_indices)
return cost_matrix
# Split track set into confirmed and unconfirmed tracks.
confirmed_tracks = [
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
unconfirmed_tracks = [
i for i, t in enumerate(self.tracks) if not t.is_confirmed()]
# Associate confirmed tracks using appearance features.
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, self.max_age,
self.tracks, detections, confirmed_tracks)
# Associate remaining tracks together with unconfirmed tracks using IOU.
iou_track_candidates = unconfirmed_tracks + [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update == 1]
unmatched_tracks_a = [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update != 1]
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, iou_track_candidates, unmatched_detections)
matches = matches_a + matches_b
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
return matches, unmatched_tracks, unmatched_detections
def _initiate_track(self, detection):
mean, covariance = self.kf.initiate(detection.to_xyah())
class_name = detection.get_class()
self.tracks.append(Track(
mean, covariance, self._next_id, self.n_init, self.max_age,
detection.feature, class_name))
self._next_id += 1
|
[
"noreply@github.com"
] |
pythonlessons.noreply@github.com
|
0be81d9de12861781b620abdc89bd97d6089d604
|
fb9a703a58b87079dddfc6f7661a11811ebc863f
|
/docker4circ/src/circhunter/circ_classifier.py
|
c0b1dffd4c4f52ecddf3ef90075273fefc4b6c2b
|
[] |
no_license
|
cursecatcher/biodocker
|
9059d2d70ca5b085e414a9b0b2e5696108e8e850
|
0521856d915c7e25f00dd387727f47f1f23b805b
|
refs/heads/master
| 2021-07-20T06:39:09.350603
| 2020-06-23T09:08:10
| 2020-06-23T09:08:10
| 185,957,438
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,672
|
py
|
#!/usr/bin/env python
import os,sys
"""
LAST UPDATED VERSION
-> WRITING UNIVOCAL CLASSIFICATION
This is a script that allows to calculate the presumed linear sequences
that form between the exons near to the circRNA and a circRNA.
It takes as input:
- export of all the exons
- intersect of circRNAs with bedtools with those exons
Note: the script will consider only exons at the 5' and 3' of circRNAs
"""
# Defining dictionaries
exon_dic = {}
circ_dic = {}
sorted_circ_dic = {}
rank_dic = {}
sorted_rank_dic = {}
unaligned_exon_dic = {}
intertranscript_dic = {}
# Opening input files
exon_data = open(sys.argv[1]).readlines()
overlap_data = open(sys.argv[2]).readlines()
"""
FUNCTION: save_exons
OBJECTIVE: saves all exons from a biomart export into a dictionary
x: assign to data
d: assign to exon_dic
"""
def save_exons(x, d):
# Defining key
exon_name = str("%s_%s" % (x[1], x[2]))
if exon_name not in d:
d[exon_name] = x
"""
FUNCTION: circ_transcripts
OBJECTIVE: to create a dictionary containing all the exons involved in
a circRNA
x: assign to data
d: assign to circ_dic
un: assign to unaligned_exon_dic
exons: assign to exon_dic
"""
def circ_transcripts (x, d, un, it, exons):
# Defining key: composed by circ name and transcript (chr_start_end_ENST)
n = x[3].strip().split("_")
circ_name = str("%s_%s" % (x[8], n[0]))
strand = x[4]
if circ_name not in d:
d[circ_name] = []
# Defining ENSE
e = x[3].strip().split("_")
ENSE = e[1]
# Defining the variables needed for exon checks
ex_start = int(x[1])
ex_end = int(x[2])
circ_start = int(x[6])
circ_end = int(x[7])
# Searching for exon rank in exon_dic
exon = x[3]
rank = str(exons[exon][7])
ENSG = exons[exon][0]
ENST = exons[exon][1]
# Creating tuple to append to dictionary
new_entry = (rank, ENSE, strand, ENSG, ENST)
# Checking exons and introns and appending ENSEs in the dictionary
if ( (ex_start == circ_start and ex_end <= circ_end)
or (ex_end == circ_end and ex_start >= circ_start) ):
d[circ_name].append(new_entry)
# extended check for introns overlapping start/end of circRNAs
elif ((len(rank) == 4 and circ_start > ex_start and circ_start < ex_end)
or (len(rank) == 4 and circ_end > ex_start and circ_end < ex_end)):
d[circ_name].append(new_entry)
# extended check for exons overlapping start/end of circRNAs to save
# in the unaligned_exon_dic, for later putative_exon classification
elif ((len(rank) == 3 and circ_start > ex_start and circ_start < ex_end)
or (len(rank) == 3 and circ_end > ex_start and circ_end < ex_end)):
if circ_name not in un:
un[circ_name] = []
un[circ_name].append(new_entry)
# SECTION FOR INTERTRANSCRIPT circRNAs
circ = x[8]
if circ not in it:
it[circ] = []
# Appending first element in indiscriminate manner
if len(it[circ]) == 0:
if (ex_start == circ_start and ex_end <= circ_end):
new = ("start", ENST)
it[circ].append(new)
elif (ex_end == circ_end and ex_start >= circ_start):
new = ("end", ENST)
it[circ].append(new)
# Appending second element only if it is mapped in the opposite
# side of the circRNA
elif len(it[circ]) == 1:
if (it[circ][0][0] != "start" and it[circ][0][1] != ENST
and ex_start == circ_start and ex_end <= circ_end):
new = ("start", ENST)
it[circ].append(new)
elif (it[circ][0][0] != "end" and it[circ][0][1] != ENST
and ex_end == circ_end and ex_start >= circ_start):
new = ("end", ENST)
it[circ].append(new)
"""
FUNCTION: save_ranks
OBJECTIVE: create a dictionary filled with exon ranks, in order to later
calculate max and min rank of a transcript
x: refers to data
d: refers to rank_dic
"""
def save_ranks(x, d):
ENST = x[1]
rank = str(x[7])
if ENST not in d:
d[ENST] = []
# Adding rank value to transcript in dictionary
d[ENST].append((rank, x[2]))
"""
FUNCTION: seq5
OBJECTIVE: obtain the coordinates of the first exon of the circRNA and
the coordinates of the exon before it.
c: refers to circ
ex: refers to exon (obtained from sorted_circ_dic[circ][0])
rd: refers to sorted_rank_dic
ed: refers to exon_dic
l: refers to label
"""
"""
def seq5(c, ex, rd, ed, l):
# Defining variables
c_info = c.strip().split("_")
circ_name = ("%s_5linseq" % c)
ENST = c_info[3]
exon2_rank = ex[0] # Rank of circRNA exon
exon1_rank = ex[0] - 1 # Rank of exon before circRNA
exon2_pos = exon2_rank -1 # Position in sorted_transcript_dic of circRNA exon
exon1_pos = exon1_rank -1 # Position in sorted_transcript_dic of exon before circRNA
exon2_ENSE = rd[ENST][exon2_pos][1]
exon1_ENSE = rd[ENST][exon1_pos][1]
exon2_searchterm = str("%s_%s" % (ENST, exon2_ENSE)) # Search term for circRNA exon in exon_dic
exon1_searchterm = str("%s_%s" % (ENST, exon1_ENSE)) # Search term for exon before the circRNA in exon_dic
# Searchin exon information
exon2_info = ed[exon2_searchterm]
exon1_info = ed[exon1_searchterm]
# Defining coordinates of exons
exon2_start = exon2_info[4]
exon2_end = exon2_info[5]
exon1_start = exon1_info[4]
exon1_end = exon1_info[5]
strand = ex[2]
final_output = [circ_name, l, exon1_ENSE, exon1_start, exon1_end, exon2_ENSE, exon2_start, exon2_end, strand]
print("%s" % ('\t'.join(final_output)))
"""
"""
FUNCTION: seq3
OBJECTIVE: obtain the coordinates of the last exon of the circRNA and
the coordinates of the exon after it.
c: refers to circ
ex: refers to exon (obtained from sorted_circ_dic[circ][0])
rd: refers to sorted_rank_dic
ed: refers to exon_dic
l: refers to label
"""
"""
def seq3(c, ex, rd, ed, l):
# Defining variables
c_info = c.strip().split("_")
circ_name = ("%s_3linseq" % c)
ENST = c_info[3]
exon1_rank = ex[0] # Rank of circRNA exon
exon2_rank = ex[0] + 1 # Rank of after before circRNA
exon1_pos = exon1_rank -1 # Position in sorted_transcript_dic of circRNA exon
exon2_pos = exon2_rank -1 # Position in sorted_transcript_dic of exon after circRNA
exon1_ENSE = rd[ENST][exon1_pos][1]
exon2_ENSE = rd[ENST][exon2_pos][1]
exon1_searchterm = str("%s_%s" % (ENST, exon1_ENSE)) # Search term for circRNA exon in exon_dic
exon2_searchterm = str("%s_%s" % (ENST, exon2_ENSE)) # Search term for exon after the circRNA in exon_dic
# Searchin exon information
exon1_info = ed[exon1_searchterm]
exon2_info = ed[exon2_searchterm]
# Defining coordinates of exons
exon1_start = exon1_info[4]
exon1_end = exon1_info[5]
exon2_start = exon2_info[4]
exon2_end = exon2_info[5]
strand = ex[2]
final_output = [circ_name, l, exon1_ENSE, exon1_start, exon1_end, exon2_ENSE, exon2_start, exon2_end, strand]
print("%s" % ('\t'.join(final_output)))
"""
########################################################################
########################################################################
########################################################################
########################################################################
# Main cycle for exons
for exon in exon_data:
data = exon.strip().split("\t")
save_exons(data, exon_dic) # Fills exon_dic
save_ranks(data, rank_dic) # Fills rank_dic
# Main cycle for circRNA overlaps
for circ in overlap_data:
data = circ.strip().split("\t")
circ_transcripts(data, circ_dic, unaligned_exon_dic, intertranscript_dic, exon_dic)
# Sorting circ_dic
for circ in circ_dic:
ex_list = circ_dic[circ]
def getKey(item):
return item[0]
# Filling sorted dictionary
sorted_circ_dic[circ] = sorted(ex_list, key=getKey)
# Purging unsorted circRNA dictionary
circ_dic = {}
# Sorting rank_dic
for transcript in rank_dic:
rank_list = rank_dic[transcript]
def getKey(item):
return item[0]
# Filling sorted dictionary
sorted_rank_dic[transcript] = sorted(rank_list, key=getKey)
# Purging unsorted rank dictionary
rank_dic = {}
"""
# Checks for linear sequences to report
for circ in sorted_circ_dic:
# Defining transcript
transcript = circ.strip().split("_")[3]
# Defining min and max rank of transcript
minimum = min(sorted_rank_dic[transcript])
maximum = max(sorted_rank_dic[transcript])
rank_min = minimum[0]
rank_max = maximum[0]
####################################################################
# Check for multiexonic circRNAs
if len(sorted_circ_dic[circ]) == 2:
# Calculating ranks of circ exons
rank_ex5 = sorted_circ_dic[circ][0][0]
rank_ex3 = sorted_circ_dic[circ][1][0]
# Checking position of circRNA in the transcript
if rank_ex5 > rank_min and rank_ex3 < rank_max:
label = "multiexon"
seq5(circ, sorted_circ_dic[circ][0], sorted_rank_dic, exon_dic, label)
seq3(circ, sorted_circ_dic[circ][1], sorted_rank_dic, exon_dic, label)
elif rank_ex5 == rank_min and rank_ex3 < rank_max:
label = "multiexon_5boundary" # Multiexon at 5' of transcript
seq3(circ, sorted_circ_dic[circ][1], sorted_rank_dic, exon_dic, label)
elif rank_ex5 > rank_min and rank_ex3 == rank_max:
label = "multiexon_3boundary" # Multiexon at 3' of transcript
seq5(circ, sorted_circ_dic[circ][0], sorted_rank_dic, exon_dic, label)
elif rank_ex5 == rank_min and rank_ex3 == rank_max:
label = "missing_exon"
print("%s\t%s" % (circ, label))
####################################################################
# Check for monoexonic circRNAs
elif len(sorted_circ_dic[circ]) == 1:
# Calculating rank of circ exon
rank = sorted_circ_dic[circ][0][0]
# Checks for true monoexonic circRNA
circ_start = int(circ.strip().split("_")[1])
circ_end = int(circ.strip().split("_")[2])
ENSE = sorted_circ_dic[circ][0][1]
ENST = circ.strip().split("_")[3]
exon_search = ("%s_%s" % (ENST, ENSE))
exon_start = int(exon_dic[exon_search][4])
exon_end = int(exon_dic[exon_search][5])
if circ_start != exon_start or circ_end != exon_end:
label = "missing_exon"
print("%s\t%s" % (circ, label))
else:
if rank > rank_min and rank < rank_max:
# Monoexonic circ with both sequences calculated
label = "monoexon"
seq5(circ, sorted_circ_dic[circ][0], sorted_rank_dic, exon_dic, label)
seq3(circ, sorted_circ_dic[circ][0], sorted_rank_dic, exon_dic, label)
elif rank == rank_min and rank < rank_max:
# Monoexon at 5' of transcript
label = "monoexon_5boundary"
seq3(circ, sorted_circ_dic[circ][0], sorted_rank_dic, exon_dic, label)
elif rank > rank_min and rank == rank_max:
# Monoexon at 3' of transcript
label = "monoexon_3boundary"
seq5(circ, sorted_circ_dic[circ][0], sorted_rank_dic, exon_dic, label)
####################################################################
# Check for missing exons
else:
label = "missing_exon"
print("%s\t%s" % (circ, label))
"""
####################### CIRCULARIZING EXONS SECTION ####################
for circ in sorted_circ_dic:
if len(sorted_circ_dic[circ]) == 2:
rank1 = sorted_circ_dic[circ][0][0]
rank2 = sorted_circ_dic[circ][1][0]
if (len(rank1) == 3 and len(rank2) == 3):
label = "multiexon"
print("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(circ, label,
sorted_circ_dic[circ][0][3], # ENSG of first exon
sorted_circ_dic[circ][0][4], # ENST of first exon
sorted_circ_dic[circ][0][1], # ENSE of first exon
sorted_circ_dic[circ][0][0], # Rank of first exon
sorted_circ_dic[circ][0][2], # Strand of first exon
sorted_circ_dic[circ][1][3], # ENSG of second exon
sorted_circ_dic[circ][1][4], # ENST of second exon
sorted_circ_dic[circ][1][1], # ENSE of second exon
sorted_circ_dic[circ][1][0], # Rank of second exon
sorted_circ_dic[circ][1][2])) # Strand of second exon
elif (len(rank1) == 4 or len(rank2) == 4):
label = "intronic"
print("%s\t%s" % (circ, label))
elif len(sorted_circ_dic[circ]) == 1:
# Calculating rank of circ exon
rank = sorted_circ_dic[circ][0][0]
# Checks for true monoexonic circRNA
circ_start = int(circ.strip().split("_")[1])
circ_end = int(circ.strip().split("_")[2])
ENSE = sorted_circ_dic[circ][0][1]
ENST = circ.strip().split("_")[3]
exon_search = ("%s_%s" % (ENST, ENSE))
exon_start = int(exon_dic[exon_search][4])
exon_end = int(exon_dic[exon_search][5])
if circ_start != exon_start or circ_end != exon_end:
# Fetching gene start and gene end
gene_start = int(exon_dic[exon_search][8])
gene_end = int(exon_dic[exon_search][9])
# Checking for intronic or intergenic circRNAs
if (circ_start >= gene_start and circ_end <= gene_end):
if (circ_start < exon_start and len(rank) == 3):
if circ in unaligned_exon_dic:
label=("putativeexon")
# Check needed to write the output in the correct order
if (unaligned_exon_dic[circ][0][2] == '+'):
print("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(circ, label,
unaligned_exon_dic[circ][0][3], # ENSG of first exon
unaligned_exon_dic[circ][0][4], # ENST of first exon
unaligned_exon_dic[circ][0][1], # ENSE of first exon
unaligned_exon_dic[circ][0][0], # Rank of first exon
unaligned_exon_dic[circ][0][2], # Strand of first exon
sorted_circ_dic[circ][0][3], # ENSG of second exon
sorted_circ_dic[circ][0][4], # ENST of second exon
sorted_circ_dic[circ][0][1], # ENSE of second exon
sorted_circ_dic[circ][0][0], # Rank of second exon
sorted_circ_dic[circ][0][2])) # Strand of second exon
else:
print("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(circ, label,
sorted_circ_dic[circ][0][3], # ENSG of second exon
sorted_circ_dic[circ][0][4], # ENST of second exon
sorted_circ_dic[circ][0][1], # ENSE of second exon
sorted_circ_dic[circ][0][0], # Rank of second exon
sorted_circ_dic[circ][0][2], # Strand of second exon
unaligned_exon_dic[circ][0][3], # ENSG of first exon
unaligned_exon_dic[circ][0][4], # ENST of first exon
unaligned_exon_dic[circ][0][1], # ENSE of first exon
unaligned_exon_dic[circ][0][0], # Rank of first exon
unaligned_exon_dic[circ][0][2])) # Strand of first exon
else:
label = "intronic"
print("%s\t%s" % (circ, label))
elif (circ_end > exon_end and len(rank) == 3):
if circ in unaligned_exon_dic:
label=("putativeexon")
# Check needed to write the output in the correct order
if (unaligned_exon_dic[circ][0][2] == '+'):
print("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(circ, label,
sorted_circ_dic[circ][0][3], # ENSG of second exon
sorted_circ_dic[circ][0][4], # ENST of second exon
sorted_circ_dic[circ][0][1], # ENSE of second exon
sorted_circ_dic[circ][0][0], # Rank of second exon
sorted_circ_dic[circ][0][2], # Strand of second exon
unaligned_exon_dic[circ][0][3], # ENSG of first exon
unaligned_exon_dic[circ][0][4], # ENST of first exon
unaligned_exon_dic[circ][0][1], # ENSE of first exon
unaligned_exon_dic[circ][0][0], # Rank of first exon
unaligned_exon_dic[circ][0][2])) # Strand of first exon
else:
print("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(circ, label,
unaligned_exon_dic[circ][0][3], # ENSG of first exon
unaligned_exon_dic[circ][0][4], # ENST of first exon
unaligned_exon_dic[circ][0][1], # ENSE of first exon
unaligned_exon_dic[circ][0][0], # Rank of first exon
unaligned_exon_dic[circ][0][2], # Strand of first exon
sorted_circ_dic[circ][0][3], # ENSG of second exon
sorted_circ_dic[circ][0][4], # ENST of second exon
sorted_circ_dic[circ][0][1], # ENSE of second exon
sorted_circ_dic[circ][0][0], # Rank of second exon
sorted_circ_dic[circ][0][2])) # Strand of second exon
else:
label = "intronic"
print("%s\t%s" % (circ, label))
else:
label = "intronic"
print("%s\t%s" % (circ, label))
elif (circ_start < gene_start or circ_end > gene_end):
label = "intergenic"
print("%s\t%s" % (circ, label))
elif len(rank) == 3:
label = "monoexon"
print("%s\t%s\t%s\t%s\t%s\t%s\t%s" % (circ,
label,
sorted_circ_dic[circ][0][3], # ENSG of first exon
sorted_circ_dic[circ][0][4], # ENST of first exon
sorted_circ_dic[circ][0][1], # ENSE of exon
sorted_circ_dic[circ][0][0], # Rank of exon
sorted_circ_dic[circ][0][2])) # Strand of exon
elif len(rank) == 4:
label = "intronic"
print("%s\t%s" % (circ, label))
elif len(sorted_circ_dic[circ]) == 0:
# Defining variables needed to check for intronic or intergenic condition
circ_start = int(circ.strip().split("_")[1])
circ_end = int(circ.strip().split("_")[2])
ENST = circ.strip().split("_")[3]
# A complete key needs to be reconstructed in order to search
# the exon dictionary correctly. Any exon of the ENST considered
# will be ok, since the searched gene values are all the same
# for every exon in the same transcript
ENSE = sorted_rank_dic[ENST][0][1]
exon_search = "%s_%s" % (ENST, ENSE)
"""
# Old method for INSANE FUCKS THAT DO NOT REMEMBER THE FUCKING
# DICTIONARIES THAT THEY MADE THEMSELVES
for key in exon_dic.keys():
if key.startswith(ENST):
exon_search = key
break
"""
gene_start = int(exon_dic[exon_search][8])
gene_end = int(exon_dic[exon_search][9])
transcript_start = int(exon_dic[exon_search][10])
transcript_end = int(exon_dic[exon_search][11])
# Checking for intronic or intergenic circRNA
if (circ_start >= gene_start and circ_end <= gene_end):
# NEWCODE
if (circ in unaligned_exon_dic and circ_start >= transcript_start and circ_end <= transcript_end):
label = "putativeexon"
print("%s\t%s\t%s\t%s\t%s\t%s\t%s" %
(circ, label,
unaligned_exon_dic[circ][0][3], # ENSG of first exon
unaligned_exon_dic[circ][0][4], # ENST of first exon
unaligned_exon_dic[circ][0][1], # ENSE of first exon
unaligned_exon_dic[circ][0][0], # Rank of first exon
unaligned_exon_dic[circ][0][2])) # Strand of first exon
else:
label = "intronic"
print("%s\t%s" % (circ, label))
elif (circ_start < gene_start or circ_end > gene_end):
label = "intergenic"
print("%s\t%s" % (circ, label))
else:
print("ERROR - len of sorted_circ_dic is %s" % (len(sorted_circ_dic[circ])))
# Printing intertranscript circRNAs
for circ in intertranscript_dic:
if len(intertranscript_dic[circ]) == 2:
print("%s_%s\tintergenic" % (circ, intertranscript_dic[circ][0][1]))
print("%s_%s\tintergenic" % (circ, intertranscript_dic[circ][1][1]))
# It was decided to refer to intertranscript circs as intergenic
# The old printing statements are below
# print("%s_%s\tintertranscript" % (circ, intertranscript_dic[circ][0][1]))
# print("%s_%s\tintertranscript" % (circ, intertranscript_dic[circ][1][1]))
################ DEBUG SECTION ########################################
"""
print("\nexon_dic")
for exon in exon_dic:
print("%s\t%s" % (exon, exon_dic[exon]))
print("\nsorted_circ_dic")
for circ in sorted_circ_dic:
print("%s\t%s" % (circ, sorted_circ_dic[circ]))
print("\nsorted_rank_dic")
for rank in sorted_rank_dic:
print("%s\t%s" % (rank, sorted_rank_dic[rank]))
print("\nunaligned_exon_dic")
for exon in unaligned_exon_dic:
print("%s\t%s" % (exon, unaligned_exon_dic[exon]))
print("\nintertranscript_dic")
for circ in intertranscript_dic:
print("%s\t%s" % (circ, intertranscript_dic[circ]))
"""
|
[
"nicola.licheri@gmail.com"
] |
nicola.licheri@gmail.com
|
41d4fa69d69c9f2e3d7c678ea517d29cb765ffd2
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startCirq2308.py
|
70c40cd0d6328c50f7c76f34f4d3e2738f0eb7e9
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,077
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=39
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=27
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=36
c.append(cirq.X.on(input_qubit[3])) # number=37
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=38
c.append(cirq.H.on(input_qubit[3])) # number=30
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=31
c.append(cirq.H.on(input_qubit[3])) # number=32
c.append(cirq.H.on(input_qubit[0])) # number=33
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=34
c.append(cirq.H.on(input_qubit[0])) # number=35
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=23
c.append(cirq.Z.on(input_qubit[3])) # number=24
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=25
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=22
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=13
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=17
c.append(cirq.X.on(input_qubit[3])) # number=18
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=19
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=15
c.append(cirq.Y.on(input_qubit[2])) # number=10
c.append(cirq.Y.on(input_qubit[2])) # number=11
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2308.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
e5934fb0f7a75782df30b02e4014b814ba7da9b5
|
a64f122dd4df3e20bc3e25aca31bb11ec9d55977
|
/Assignment 3/madelon tricks.py
|
4ec6ab3ce62d92cde3f7c56c28d599185143b045
|
[] |
no_license
|
mbrine555/gatech_ML
|
f9de5e1e1c29e40693030fcf3dce4797339f3ada
|
2a3dea874ac7710104fb891a5199afa9f3c046af
|
refs/heads/master
| 2020-04-16T10:39:44.328425
| 2019-04-10T11:54:37
| 2019-04-10T11:54:37
| 165,512,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,999
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 18 13:08:50 2017
@author: JTay
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from helpers import nn_arch,nn_reg,ImportanceSelect
from matplotlib import cm
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import pairwise_distances
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
out = './PCA/'
cmap = cm.get_cmap('Spectral')
np.random.seed(0)
madelon = pd.read_hdf('./BASE/datasets.hdf','madelon')
madelonX = madelon.drop('Class',1).copy().values
madelonY = madelon['Class'].copy().values
scaler =StandardScaler()
madelon_test = pd.read_hdf('./BASE/datasets.hdf','madelon')
madelon_tstX = madelon_test.drop('Class',1).copy().values
madelon_tstY = madelon_test['Class'].copy().values
from sklearn.ensemble import RandomForestClassifier
madelonX = scaler.fit_transform(madelonX)
madelon_tstX = scaler.transform(madelon_tstX)
#Reproduce best estimator so far
#if __name__=='__main__':
# rfc = RandomForestClassifier(n_estimators=100,class_weight='balanced',random_state=5,n_jobs=7)
# filtr = ImportanceSelect(rfc)
# grid ={'filter__n':[20],'NN__alpha':nn_reg,'NN__hidden_layer_sizes':nn_arch}
# mlp = MLPClassifier(activation='relu',max_iter=2000,early_stopping=True,random_state=5)
# pipe = Pipeline([('filter',filtr),('NN',mlp)])
# gs = GridSearchCV(pipe,grid,verbose=10,cv=5)
# gs.fit(madelonX,madelonY)
# print('Best CV Score {}'.format(gs.best_score_))
# print('Test Score {}'.format(gs.score(madelon_tstX,madelon_tstY)))
# rf_features = gs.best_estimator_.steps[0][1].model.feature_importances_.argsort()[::-1][:20]
# Use PCA to find true correct featuers
pca = PCA(random_state=5,n_components=500)
pca.fit(madelonX)
ve = pd.Series(pca.explained_variance_)
ve.plot()
plt.xlabel('Component')
plt.ylabel('Variance Explained')
tmp = pd.DataFrame(pca.components_)
tmp=tmp.iloc[-15:,:]
pca_features=tmp.columns[tmp.abs().max()>0.1]
xx= madelonX[:,pca_features]
xx_tst = madelon_tstX[:,pca_features]
## NN testing - standard param set
#grid ={'alpha':nn_reg,'hidden_layer_sizes':nn_arch}
#mlp = MLPClassifier(activation='relu',max_iter=3000,early_stopping=False,random_state=5)
#gs = GridSearchCV(mlp,param_grid=grid,verbose=10,cv=5)
#gs.fit(madelonX[:,pca_features],madelonY)
#print('NN - Standard params - Best CV Score {}'.format(gs.best_score_))
#print('NN - Standard params - Test Score {}'.format(gs.score(xx_tst,madelon_tstY)))
#
#
#
## NN testing - standard param set
#grid ={'alpha':[1e-4,1e-5,1e-6],'hidden_layer_sizes':[(200,100,100,64,100,100,200)]}
#mlp = MLPClassifier(activation='relu',max_iter=3000,early_stopping=False,random_state=5)
#gs = GridSearchCV(mlp,param_grid=grid,verbose=10,cv=5)
#gs.fit(madelonX[:,pca_features],madelonY)
#print('NN - Big network- Best CV Score {}'.format(gs.best_score_))
#print('NN - Big network - Test Score {}'.format(gs.score(xx_tst,madelon_tstY)))
#KNN
knn = KNeighborsClassifier()
grid={'n_neighbors':range(1,25,1),'p':[1,2],'weights':['uniform','distance']}
gs = GridSearchCV(knn,param_grid=grid,cv=5,verbose=10)
gs.fit(xx,madelonY)
print('KNN - Best CV Score {}'.format(gs.best_score_))
print('KNN - Test Score {}'.format(gs.score(xx_tst,madelon_tstY)))
# SVM
dis = pairwise_distances(xx)
m = np.median(dis)
gammas = [(1/m)*x for x in np.arange(0.1,2.1,0.1)]+[0.1,0.2,0.3,0.4,0.5]
gammas = np.arange(0.1,0.9,0.05)
gammas = [(1/m)*x for x in np.arange(0.1,2.1,0.1)]
param_grid={'gamma':gammas,'C':[10**x for x in [-1,0,1,2,3]]}
gs = GridSearchCV(SVC(kernel='rbf',C=1),param_grid=param_grid,cv=5,verbose=10,n_jobs=1)
gs.fit(xx,madelonY)
print('SVM - Best CV Score {}'.format(gs.best_score_))
print('SVM - Test Score {}'.format(gs.score(xx_tst,madelon_tstY)))
|
[
"briner.15@osu.edu"
] |
briner.15@osu.edu
|
47cef2617e45b179ad7170b343230053c4e3f72b
|
e4688b4752f93d6f4db24f99c551b6fd3ca89571
|
/spectrum_all/code/shiyan_work_for_specturm.py
|
c2352c7be38b5eb83718456a5ed3b746bba0f541
|
[] |
no_license
|
hanabimarch/my_work
|
925fce839965e3ba144c20aa8f6e752272c694b3
|
69ecddd214c76275cd228624f3344396bb2f9e32
|
refs/heads/master
| 2022-04-27T14:02:33.723596
| 2020-04-27T02:43:21
| 2020-04-27T02:43:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,280
|
py
|
from zjh_make_phaI import *
import zzh_py3_file as zhf
import os
from zjh_xspec_fit_kernel import xspec_fit_kernel
import matplotlib.pyplot as plt
data_topdir = '/home/laojin/trigdata/2017/'
result_topdir = '/home/laojin/results/results_GBM/2017/'
namelist = zhf.readcol('/home/laojin/results/results_GBM/Y_all_sample_name.txt')[0]
yearnamelist = []
for i in namelist:
if i[:4]=='bn17' :
yearnamelist.append(i)
print('year name list:\n',yearnamelist)
for bnname in yearnamelist:
file_1_link = result_topdir + bnname + '/good_NaI_detectors.txt'
file_2_link = result_topdir + bnname + '/good_BGO_detectors.txt'
file_3_link = result_topdir + bnname + '/0_pulse_edges.txt'
save_all = result_topdir + 'A_spectrum_1/'
save_all2 = result_topdir + 'A_spectrum_2/'
if(os.path.exists(save_all) == False):
os.makedirs(save_all)
if (os.path.exists(save_all2) == False):
os.makedirs(save_all2)
if(os.path.exists(file_1_link) and os.path.exists(file_2_link) and os.path.exists(file_3_link)):
ni_s,ni_list = zhf.readcol(file_1_link)
bgo = zhf.readcol(file_2_link)[0][0]
print(bnname)
if(len(ni_s)>=2):
try:
ni_list = ni_list[:2]
makedirs = result_topdir + bnname + '/spectrum/'
if(os.path.exists(makedirs) == False):
os.makedirs(makedirs)
edges_start,edges_stop = zhf.readcol(file_3_link)
for i in range(len(edges_start)):
print('--------------------------------------')
print('time slice ',i)
savedir = makedirs + 'time_' + str(i) + '/'
filelist = ['A_'+bnname+'_'+bgo+'.pha']
dt = edges_stop[i]-edges_start[i]
#-------------------------------------------------
if(dt/3.>=1):
binsize = 1 #动态binsize
else:
binsize = dt/3.
#-------------------------------------------------
for ni in ni_list:
print(ni)
filelist.append('A_'+bnname+'_'+ni+'.pha')
make_phaI(bnname,ni,data_topdir,savedir,edges_start[i],edges_stop[i],binsize = binsize)#这里只有选0.01才可以
#make_phaI_with_events(bnname,ni,data_topdir,savedir,edges_start[i],edges_stop[i])
plt.savefig(save_all+'A_'+bnname+'_'+ni+'_'+str(i)+'.png')
plt.close()
print(bgo)
make_phaI(bnname,bgo,data_topdir,savedir,edges_start[i],edges_stop[i],binsize = binsize)
#make_phaI_with_events(bnname,bgo,data_topdir,savedir,edges_start[i],edges_stop[i])
plt.savefig(save_all+'A_'+bnname+'_'+bgo+'_'+str(i)+'.png')
plt.close()
print('---------------------------------------')
try:
value,value_arr1,value_arr2,flux_list = xspec_fit_kernel(filelist,savedir,makedirs+'Z_'+str(i))
plt.savefig(save_all2 + 'A_'+bnname+'_'+str(i)+'.png')
plt.close()
#copy_rspI(savedir+'foldedspec.png',save_all2 + 'A_'+bnname+'_'+str(i)+'.png')
zhf.printdatatofile(makedirs+'Z_'+str(i)+'_Flux.txt',data = flux_list)
zhf.printdatatofile(makedirs+'Z_'+str(i)+'_Epec.txt',data = [[value],[value_arr1],[value_arr2]])
except:
print(bnname+' '+str(i)+'出现错误,跳过拟合!')
continue
except(IndexError):
print(bnname+'文件内容错误,跳过拟合!')
continue
else:
print(bnname+'探头数量不足')
else:
print(bnname+'信息不完整。')
|
[
"653538607@qq.com"
] |
653538607@qq.com
|
5ac2d00d5ea2df70b34fed18d5398d39704751c1
|
f77fd2fd6aa5aa80212e4a7fc036d0987a8c2e3e
|
/Fourteenth_Chapter/ply_flo.py
|
be0c5b0acfb5b3a99a52748ff0f08c69f405025f
|
[] |
no_license
|
apsz/python-learning
|
a61a30b18fc3df932eb52aca5cfe9773687de038
|
52c29b8ecf02c6a7603b1aabc560a2899183b099
|
refs/heads/master
| 2021-01-19T21:20:31.418044
| 2017-05-02T15:51:40
| 2017-05-02T15:51:40
| 88,644,917
| 0
| 0
| null | 2017-05-02T12:40:54
| 2017-04-18T16:06:22
|
Python
|
UTF-8
|
Python
| false
| false
| 6,106
|
py
|
#!/usr/bin/python3
import sys
import ply.lex
import ply.yacc
def get_file_handle(filename):
try:
return open(filename)
except (EnvironmentError, IOError) as file_err:
print('Error opening {}: {}'.format(filename, file_err))
sys.exit()
def ply_parse(text):
"""
>>> formula = "a = b"
>>> print(ply_parse(formula))
['a', '=', 'b']
>>> formula = "forall x: a = b"
>>> print(ply_parse(formula))
['forall', 'x', ['a', '=', 'b']]
>>> formula = "a & b"
>>> print(ply_parse(formula))
['a', '&', 'b']
>>> formula = "~true -> ~b = c"
>>> print(ply_parse(formula))
[['~', 'true'], '->', ['~', ['b', '=', 'c']]]
>>> formula = "~true -> ~(b = c)"
>>> print(ply_parse(formula))
[['~', 'true'], '->', ['~', ['b', '=', 'c']]]
>>> formula = "exists y: a -> b"
>>> print(ply_parse(formula))
['exists', 'y', ['a', '->', 'b']]
>>> formula = "forall x: exists y: a = b"
>>> print(ply_parse(formula))
['forall', 'x', ['exists', 'y', ['a', '=', 'b']]]
>>> formula = "forall x: exists y: a = b -> a = b & ~ a = b -> a = b"
>>> print(ply_parse(formula))
['forall', 'x', ['exists', 'y', [['a', '=', 'b'], '->', [[['a', '=', 'b'], '&', ['~', ['a', '=', 'b']]], '->', ['a', '=', 'b']]]]]
>>> formula = "(forall x: exists y: a = b) -> a = b & ~ a = b -> a = b"
>>> print(ply_parse(formula))
[['forall', 'x', ['exists', 'y', ['a', '=', 'b']]], '->', [[['a', '=', 'b'], '&', ['~', ['a', '=', 'b']]], '->', ['a', '=', 'b']]]
>>> formula = "(forall x: exists y: true) -> true & ~ true -> true"
>>> print(ply_parse(formula))
[['forall', 'x', ['exists', 'y', 'true']], '->', [['true', '&', ['~', 'true']], '->', 'true']]
>>> formula = "a = b -> c = d & e = f"
>>> result1 = ply_parse(formula)
>>> formula = "(a = b) -> (c = d & e = f)"
>>> result2 = ply_parse(formula)
>>> result1 == result2
True
>>> result1
[['a', '=', 'b'], '->', [['c', '=', 'd'], '&', ['e', '=', 'f']]]
>>> formula = "forall x: exists y: true -> true & true | ~ true"
>>> print(ply_parse(formula))
['forall', 'x', ['exists', 'y', ['true', '->', [['true', '&', 'true'], '|', ['~', 'true']]]]]
>>> formula = "~ true | true & true -> forall x: exists y: true"
>>> print(ply_parse(formula))
[[['~', 'true'], '|', ['true', '&', 'true']], '->', ['forall', 'x', ['exists', 'y', 'true']]]
>>> formula = "true & forall x: x = x"
>>> print(ply_parse(formula))
['true', '&', ['forall', 'x', ['x', '=', 'x']]]
>>> formula = "true & (forall x: x = x)" # same as previous
>>> print(ply_parse(formula))
['true', '&', ['forall', 'x', ['x', '=', 'x']]]
>>> formula = "forall x: x = x & true"
>>> print(ply_parse(formula))
['forall', 'x', [['x', '=', 'x'], '&', 'true']]
>>> formula = "(forall x: x = x) & true" # different to previous
>>> print(ply_parse(formula))
[['forall', 'x', ['x', '=', 'x']], '&', 'true']
>>> formula = "forall x: = x & true"
>>> print(ply_parse(formula))
Syntax error, line 2: EQUALS
[]
"""
keywords = {"exists": "EXISTS", "forall": "FORALL",
"true": "TRUE", "false": "FALSE"}
tokens = (["SYMBOL", "COLON", "COMMA", "LPAREN", "RPAREN",
"EQUALS", "NOT", "AND", "OR", "IMPLIES"] +
list(keywords.values()))
def t_SYMBOL(t):
r"[a-zA-Z]\w*"
t.type = keywords.get(t.value, "SYMBOL")
return t
t_EQUALS = r"="
t_NOT = r"~"
t_AND = r"&"
t_OR = r"\|"
t_IMPLIES = r"->"
t_COLON = r":"
t_COMMA = r","
t_LPAREN = r"\("
t_RPAREN = r"\)"
t_ignore = " \t\n"
def t_newline(t):
r"\n+"
t.lexer.lineno += len(t.value)
def t_error(t):
line = t.value.lstrip()
i = line.find("\n")
line = line if i == -1 else line[:i]
raise ValueError("Syntax error, line {0}: {1}"
.format(t.lineno + 1, line))
def p_formula_quantifier(p):
"""FORMULA : FORALL SYMBOL COLON FORMULA
| EXISTS SYMBOL COLON FORMULA"""
p[0] = [p[1], p[2], p[4]]
def p_formula_binary(p):
"""FORMULA : FORMULA IMPLIES FORMULA
| FORMULA OR FORMULA
| FORMULA AND FORMULA"""
p[0] = [p[1], p[2], p[3]]
def p_formula_not(p):
"FORMULA : NOT FORMULA"
p[0] = [p[1], p[2]]
def p_formula_boolean(p):
"""FORMULA : FALSE
| TRUE"""
p[0] = p[1]
def p_formula_group(p):
"FORMULA : LPAREN FORMULA RPAREN"
p[0] = p[2]
def p_formula_symbol(p):
"FORMULA : SYMBOL"
p[0] = p[1]
def p_formula_equals(p):
"FORMULA : TERM EQUALS TERM"
p[0] = [p[1], p[2], p[3]]
def p_term(p):
"""TERM : SYMBOL LPAREN TERMLIST RPAREN
| SYMBOL"""
p[0] = p[1] if len(p) == 2 else [p[1], p[3]]
def p_termlist(p):
"""TERMLIST : TERM COMMA TERMLIST
| TERM"""
p[0] = p[1] if len(p) == 2 else [p[1], p[3]]
def p_error(p):
if p is None:
raise ValueError("Unknown error")
raise ValueError("Syntax error, line {0}: {1}".format(
p.lineno + 1, p.type))
# from lowest to highest precedence!
precedence = (("nonassoc", "FORALL", "EXISTS"),
("right", "IMPLIES"),
("left", "OR"),
("left", "AND"),
("right", "NOT"),
("nonassoc", "EQUALS"))
lexer = ply.lex.lex()
parser = ply.yacc.yacc()
try:
return parser.parse(text, lexer=lexer)
except ValueError as err:
print(err)
return []
def main():
if len(sys.argv) == 1 or len(sys.argv) > 2 or \
sys.argv[1] in {'-h', '--help'}:
print('Usage: {} <file>.flo'.format(sys.argv[0]))
sys.exit()
fh = get_file_handle(sys.argv[1])
ply_parse(fh.read())
if __name__ == '__main__':
import doctest
doctest.testmod()
|
[
"a.zych@student.uw.edu.pl"
] |
a.zych@student.uw.edu.pl
|
8295021fdcaacf6a698cf4e7e504dcd3e9c05cd3
|
c4aa09e204ed447f570aa79b634fb8012fe287c5
|
/LeVanThe_43609_CH01/Project/Page_33_Project_02.py
|
c4468540d5d057e4444d72caee9e7e1c4ab259f0
|
[] |
no_license
|
LeVanthe2502/LeVanThe
|
c1201a3bbf764cfc307a8dc879b6f5c4409b68b5
|
1b45fde16e199c8817980062aefee9448c75c3a8
|
refs/heads/main
| 2023-08-29T19:43:17.104264
| 2021-11-02T12:40:53
| 2021-11-02T12:40:53
| 423,838,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 300
|
py
|
"""
Author: Le Van The
Date: 26/08/2021
Problem: Write a Python program that prints (displays) your name, address, and telephone number
Solution:
....
"""
name=("Le Van The")
address=("Quang Ngai")
telephone_number=("0359940793")
print(name)
print(address)
print(telephone_number)
|
[
"noreply@github.com"
] |
LeVanthe2502.noreply@github.com
|
135a7ce285e4e108901b884562de70958bca05d3
|
ffeacff13af906bf5e7a02018a2543902f5dc8ef
|
/01-Python核心编程/代码/05-函数/01-函数一/hm_10_函数嵌套调用应用之函数计算.py
|
0a326afbd87d40ebf4dd6827cde68b23c243fed8
|
[
"MIT"
] |
permissive
|
alikslee/Python-itheima-2019
|
457080ee83d0f5f7eaba426da0ea86405d2d5248
|
691035d5ff0e362139c7dbe82f730ec0e060fd2e
|
refs/heads/main
| 2023-01-01T16:27:20.062463
| 2020-10-22T16:20:29
| 2020-10-22T16:20:29
| 305,959,901
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
# 1. 任意三个数之和
def sum_num(a, b, c):
return a + b + c
result = sum_num(1, 2, 3)
# print(result)
# 2. 任意三个数求平均值
def average_num(a, b, c):
# 先求和 再除以3
sumResult = sum_num(a, b, c)
return sumResult / 3
averageResult = average_num(1, 2, 3)
print(averageResult)
|
[
"lee079074256@gmail.com"
] |
lee079074256@gmail.com
|
6266e6a4adbc5000d80c5fa18e6ac21098c08422
|
b6ccea6c641493b648ba5d9dff0174bfc31fe612
|
/nh_patient_flow/nh_clinical_extension.py
|
d1f7ab6741ee58e2490f61d4d58b5e5d29490e1c
|
[] |
no_license
|
NeovaHealth/patientflow
|
d9e52f8ee8aff5cf329805785a2a0be4e79341cc
|
230cc87d71069abf4f221d02166f907dd1d38aef
|
refs/heads/master
| 2021-01-18T16:55:09.926934
| 2016-01-08T12:43:32
| 2016-01-08T12:43:32
| 46,868,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,249
|
py
|
from openerp.osv import orm, fields, osv
from openerp.tools import SUPERUSER_ID
from datetime import datetime as dt, timedelta as td
class nh_clinical_patient_flow_patient(orm.Model):
_name = 'nh.clinical.patient'
_inherit = 'nh.clinical.patient'
_columns = {
'unverified_nhs': fields.char('NHS Number', size=100)
}
def _check_match(self, cr, uid, patient_id, data, context=None):
patient = self.browse(cr, uid, patient_id, context=context)
keys = ['given_name', 'middle_names', 'family_name', 'dob', 'gender', 'ethnicity', 'other_identifier']
for k in keys:
if eval('patient.%s' % k) and data.get(k):
if k not in ('gender', 'ethnicity', 'other_identifier'):
if eval('patient.%s' % k) != data.get(k):
return False
elif k == 'other_identifier':
if eval('patient.%s' % k) != data.get(k) and eval('patient.%s' % k)[0:3] != 'NH_':
return False
elif k == 'gender':
if eval('patient.%s' % k) != data.get(k) and eval('patient.%s' % k) != 'NSP':
return False
else:
if eval('patient.%s' % k) != data.get(k) and eval('patient.%s' % k) != 'Z':
return False
patient_nhs = patient.patient_identifier or patient.unverified_nhs
data_nhs = data.get('patient_identifier') or data.get('unverified_nhs')
if patient_nhs and data_nhs:
if patient_nhs != data_nhs:
return False
return True
def check_nhs_number(self, cr, uid, nhs_number, exception=False, context=None):
"""
Checks if there is a patient with the provided NHS Number
If there is no patient with the provided NHS Number and a patient with a matching unverified NHS number is
found, its actual NHS Number will be updated.
:param exception: string with values 'True' or 'False'.
:return: if no exception parameter is provided: True if patient exists. False if not.
if exception = 'True': Integrity Error exception is raised if patient exists. False if not.
if exception = 'False': True if patient exists. Patient Not Found exception is raised if not.
"""
domain = [['patient_identifier', '=', nhs_number]]
result = bool(self.search(cr, uid, domain, context=context))
if not result:
domain = [['unverified_nhs', '=', nhs_number]]
patient_id = self.search(cr, uid, domain, context=context)
if patient_id:
self.write(cr, uid, patient_id, {'patient_identifier': nhs_number}, context=context)
result = bool(patient_id)
if exception:
if eval(exception) and result:
raise osv.except_osv('Integrity Error!', 'Patient with NHS Number %s already exists!'
% nhs_number)
elif not eval(exception) and not result:
raise osv.except_osv('Patient Not Found!', 'There is no patient with NHS Number %s' %
nhs_number)
return result
|
[
"joelortiz@neovahealth.co.uk"
] |
joelortiz@neovahealth.co.uk
|
fa2603385a5568d70c8b57d241059f41cc12cbf9
|
bce5ab1161b8b51b0f9370545181949c4b9ba713
|
/server.1.py
|
e78c59d905d2a9bd9020345edf6541e2e609295b
|
[] |
no_license
|
Excelvrn/BitChess
|
6dde46f077346346dd1dda2b5ea6417f348d7cd5
|
a6894bd739d756e020fac306318eea2b68a5fe19
|
refs/heads/master
| 2023-05-12T07:36:12.799873
| 2021-06-01T06:42:34
| 2021-06-01T06:42:34
| 46,856,377
| 0
| 0
| null | 2021-02-06T15:09:38
| 2015-11-25T11:03:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,926
|
py
|
#set debug=4
import sys, socket
print(sys.path)
from _thread import *
#print("Start")
#sys.path+=['/home/excelvrn/Pyth/django-main']
#sys.path+=['/home/excelvrn/Pyth/asgiref-main']
#print(sys.path)
debug = 4
sockettime = 30
#adr = ('80.82.63.117', 8990)
#adr = ("192.168.1.1", 8000)
adr = ("", 8000)
listenvar = 100
htmlsite = b'<html><head></head><body><div>asdf</div></body></html>'
def pr(x):
print("\tX:\t", x)
pass
def create():
socket.setdefaulttimeout(sockettime)
#adr = ("80.82.63.117", 80)
adr = ("192.168.1.1", 80)
serv_sock = socket.create_connection(adr)
print(serv_sock.accept())
print(serv_sock, "\n")
as_se_sock = serv_sock.accept()
print(as_se_sock)
print(serv_sock.close())
print("END")
pass
def create2():
#adr = ('192.168.1.1', 80)
socktype = socket.SOCK_STREAM
#socket
#serv_sock = socket.socket(socket.AF_INET, socktype)
serv_sock = socket.socket()
print(serv_sock)
#bind
serv_sock.bind(('', 80))
serv_sock.listen(1)
#print()
pass
def create3():
sock = socket.socket()
socket.setdefaulttimeout(sockettime)
print(sock)
pr(1)
sock.bind(adr)
pr(2)
sock.listen(listenvar)
pr(3)
conn, addr = sock.accept()
print('conn, addr:\t',conn, addr)
pr(4)
print('connected:', addr)
while True:
print( 'sock.getpeername():\t', sock.getsockname())
data = conn.recv(1024)
pr(5)
print(data)
sdata = b'GET / HTTP/1.1\r\nUser-Agent: ExcelvrnJob\r\n\r\n'
conn.send(htmlsite)
if not data:
break
pr(5)
print(conn)
print(sock)
while 1:
pass
conn.close()
pass
def threaded_client(connection):
connection.send(str.encode('Welcome to the Server'))
while True:
data = connection.recv(2048)
reply = 'Server Says: ' + data.decode('utf-8')
if not data:
break
connection.sendall(str.encode(reply))
connection.close()
pass
def create4():
ServerSocket = socket.socket()
host = ''
port = 8000
ThreadCount = 0
try:
ServerSocket.bind((host, port))
except socket.error as e:
print(str(e))
print('Waitiing for a Connection..')
ServerSocket.listen(5)
while True:
Client, address = ServerSocket.accept()
print('Connected to: ' + address[0] + ':' + str(address[1]))
start_new_thread(threaded_client, (Client, ))
ThreadCount += 1
print('Thread Number: ' + str(ThreadCount))
ServerSocket.close()
pass
def name():
print(socket.gethostname())
#print( socket.gethostbyaddr('192.168.1.33'))
print( socket.gethostbyaddr('80.82.63.117'))
print( socket.sethostname(name))
pass
if debug == 0:
create()
elif debug == 1:
name()
elif debug == 2:
create2()
elif debug == 3:
create3()
elif debug == 4:
create4()
|
[
"noreply@github.com"
] |
Excelvrn.noreply@github.com
|
aa9c5a2a06c2f777eb8d1ff5ba6fc8345a772f26
|
946441f1e45b093de0311e3b0ce70b16bdee9bdb
|
/backend/app/users/migrations/0001_initial.py
|
6a4785e674ddf8ae3d5fa99fc1cdd72c43f601ca
|
[] |
no_license
|
HousewifeHacker/VolunteerPR
|
91c3f53c930e13d007a20e576732ec36cbd7bb36
|
08bb7d20dd524feb53984fbc694dd6471ee3a992
|
refs/heads/master
| 2023-01-19T08:27:02.363742
| 2020-05-25T02:39:59
| 2020-05-25T02:39:59
| 234,778,530
| 0
| 0
| null | 2023-01-05T05:17:43
| 2020-01-18T18:28:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,973
|
py
|
# Generated by Django 2.2.12 on 2020-04-11 00:21
import app.users.models
from django.db import migrations, models
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254, unique=True)),
('is_active', models.BooleanField(default=True)),
('date_joined', models.DateTimeField(default=django.utils.timezone.now)),
('phone_number', models.CharField(blank=True, max_length=17)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
managers=[
('objects', app.users.models.CustomUserManager()),
],
),
]
|
[
"housewifehacker@gmail.com"
] |
housewifehacker@gmail.com
|
1f4f53aa0f0f5d3718751d27bb4b54733dd28fa3
|
ec7b1bda2461cb1d28b75b3f6b13ae64da7bf09f
|
/code/lib/plotting.py
|
070691fc3d125d7db2e205fd3b740c98d2a95af9
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
andim/paper-tcellimprint
|
c38361714e308030bc91fd14cb03bf0d28b22692
|
e89605e51014fa3f347f96bab3d3d84c2b013a2f
|
refs/heads/master
| 2022-03-06T06:03:06.303029
| 2022-03-03T00:56:17
| 2022-03-03T00:56:17
| 280,264,815
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,127
|
py
|
import string, re, math, itertools, glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import palettable
import seaborn as sns
import scipy.stats
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import statsmodels.api as sm
from .config import *
# save and close plotted stuff
def plot_save(datadir, fidx, tag):
plt.savefig("%sout%d-%s.pdf" % (datadir, fidx, tag))
print("Printed %sout%d-%s.pdf" % (datadir, fidx, tag))
plt.close()
# plot zipf
def plot_zipf(sizes, normed=True, ax=None,
scalex=1.0, scaley=1.0, **kwargs):
if ax is None:
ax = plt.gca()
size_unique, size_count = np.unique(sizes, return_counts=True)
if normed:
size_count = size_count.astype(float)
size_count /= np.sum(size_count)
ax.plot(scalex*size_unique, scaley*size_count, **kwargs)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel("Clone Size Frequency")
ax.set_xlabel("Clone Size")
def label_axes(fig_or_axes, labels=string.ascii_uppercase,
labelstyle=r'%s',
xy=(-0.1, 0.95), xycoords='axes fraction', **kwargs):
"""
Walks through axes and labels each.
kwargs are collected and passed to `annotate`
Parameters
----------
fig : Figure or Axes to work on
labels : iterable or None
iterable of strings to use to label the axes.
If None, lower case letters are used.
loc : Where to put the label units (len=2 tuple of floats)
xycoords : loc relative to axes, figure, etc.
kwargs : to be passed to annotate
"""
# re-use labels rather than stop labeling
defkwargs = dict(fontweight='bold')
defkwargs.update(kwargs)
labels = itertools.cycle(labels)
axes = fig_or_axes.axes if isinstance(fig_or_axes, plt.Figure) else fig_or_axes
for ax, label in zip(axes, labels):
ax.annotate(labelstyle % label, xy=xy, xycoords=xycoords,
**defkwargs)
# plot CDF
def plot_rankfrequency(data, ax=None,
normalize_x=True, normalize_y=False,
log_x=True, log_y=True,
scalex=1.0, scaley=1.0, **kwargs):
"""
Plot rank frequency plots.
data: count data to be plotted
ax: matplotlib Axes instance
normalize_x: if True (default) plot relative frequency, if False plot raw counts
normalize_y: if False (default) plot rank, if True plot cumulative probability
"""
if ax is None:
ax = plt.gca()
data = np.asarray(data)
data = data[~np.isnan(data)]
if normalize_x:
data = data/np.sum(data)
sorted_data = np.sort(data) # Or data.sort(), if data can be modified
# Cumulative counts:
if normalize_y:
norm = sorted_data.size
else:
norm = 1
ret = ax.step(sorted_data[::-1]*scalex, scaley*np.arange(sorted_data.size)/norm, **kwargs)
if log_x:
ax.set_xscale('log')
if log_y:
ax.set_yscale('log')
if normalize_x:
ax.set_xlabel('Normalized clone size')
else:
ax.set_xlabel('Clone size')
if not normalize_y:
ax.set_ylabel('Clone size rank')
return ret
def plot_insetcolorbar(vmin, vmax, cmap, step=0.1, label=None, ax=None):
"""
Plot an inset colorbar based on a dummy axes
"""
if ax is None:
ax = plt.gca()
fig, dummyax = plt.subplots()
# make dummy plot for colorbar
levels = np.arange(vmin, vmax+step, step)
CS = dummyax.contourf([[0,0],[1,0]], levels, cmap=cmap)
plt.close(fig)
cax = inset_axes(ax, width="30%", height="3%", loc='upper right')
cbar = plt.colorbar(CS, orientation='horizontal', cax=cax, ticks=[vmin, vmax])
if label:
cbar.set_label(label)
return cax, cbar
def plot_referencescaling(ax=None, x=[4e-5, 4e-2], factor=1.0, color='k', exponent=-1.0, label=True, **kwargs):
"""
Plot a reference power law scaling with slope -1.
kwargs are passed to ax.plot
"""
if ax is None:
ax = plt.gca()
x = np.asarray(x)
ax.plot(x, factor*x**exponent, color=color, **kwargs)
if label:
xt = scipy.stats.gmean(x)
xt = xt*1.05
yt = factor*xt**exponent *1.05
ax.text(xt, yt, '%g'%exponent, va='bottom', ha='left', color=color)
def statsmodels_regression(x, y):
x = sm.add_constant(x)
model = sm.OLS(y,x)
results = model.fit()
return model, results
def plot_regression(x, y, ax=None,
logy=False, p_cutoff=0.05, fit_slope=True,
extend=0.0, ci=95, plot_ci=True,
fittype='bootstrap',
fittransform=None,
data_label='',
label=None,
**kwargs):
"""Plot a linear regression analysis.
logy: log-transform data before fitting
p_cutoff: significance cutoff for showing the fitted slope
fit_slope: fit slope if True else rate
fittype : in bootstrap, scipy, statsmodels
extend: by how much to extend the fitting function beyond the fitted values
"""
if fittype not in ['bootstrap', 'scipy', 'statsmodels']:
raise Exception('Invalid argument')
if label is None:
if fittype == 'bootstrap':
label = '{0:.0f} [{1:.0f}, {2:.0f}]'
elif fittype == 'scipy':
label = '${0:.0f} \pm {1:.0f}$'
elif fittype == 'statsmodels':
label = '${0:.0f} \pm {2:.0f} x + {1:.0f} \pm {3:.0f}$'
if ax is None:
ax = plt.gca()
l, = ax.plot(x, y, 'o', label=data_label, **kwargs)
if logy:
y = np.log(y)
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x, y)
if fittype == 'bootstrap':
if fit_slope:
def robust_linregress(x, y):
try:
res = scipy.stats.linregress(x, y)
return res
except FloatingPointError:
return [np.nan]*5
fit_parameter = 1/slope
bootstrap = sns.algorithms.bootstrap(x, y, func=lambda x, y: 1/robust_linregress(x, y)[0], n_boot=10000)
else:
fit_parameter = slope
bootstrap = sns.algorithms.bootstrap(x, y, func=lambda x, y: scipy.stats.linregress(x, y)[0], n_boot=10000)
low, high = sns.utils.ci(bootstrap)
print('fit', fit_parameter, 'std', np.std(bootstrap),
'p', p_value, 'low', low, 'high', high)
label = label.format(fit_parameter, low, high)
elif fittype == 'scipy':
if fit_slope:
label = label.format(1/slope, std_err/slope**2, r_value**2)
else:
label = label.format(slope, std_err, r_value**2)
elif fittype == 'statsmodels':
x_fit = x.copy()
if not fittransform is None:
x_fit = fittransform(x_fit)
model, results = statsmodels_regression(x_fit, y)
label = label.format(results.params[1], results.params[0], results.bse[1], results.bse[0])
print(label)
x_fit = np.linspace(min(x)-extend, max(x)+extend, 400)
y_fit = intercept+slope*x_fit
if logy:
y_fit = np.exp(y_fit)
ax.set_yscale('log')
ax.plot(x_fit, y_fit, c=l.get_color(),
label=label if p_value<p_cutoff else 'NS', **kwargs)
# error band for plot
if plot_ci:
def reg_func(_x, _y):
return np.linalg.pinv(_x).dot(_y)
X = np.c_[np.ones(len(x)), x]
grid = np.c_[np.ones(len(x_fit)), x_fit]
yhat = grid.dot(reg_func(X, y))
beta_boots = sns.algorithms.bootstrap(X, y, func=reg_func,
n_boot=10000).T
yhat_boots = grid.dot(beta_boots).T
err_bands = sns.utils.ci(yhat_boots, ci, axis=0)
if logy:
err_bands = np.exp(err_bands)
ax.fill_between(x_fit, *err_bands, facecolor=l.get_color(), alpha=.3)
return slope, intercept, r_value**2
def _split(number):
""" Split a number in python scientific notation in its parts.
@return value and exponent of number
"""
return re.search(r'(-?[0-9].[0-9]*)(?:e\+?)(-?[0-9]*)', number).groups()
def str_quant(u, uerr, scientific=False):
""" Make string representation in nice readable format
>>> str_quant(0.0235, 0.0042, scientific = True)
'2.4(5) \\\cdot 10^{-2}'
>>> str_quant(1.3, 0.4)
'1.3(4)'
>>> str_quant(8.4, 2.3)
'8(3)'
>>> str_quant(-2, 0.03)
'-2.00(3)'
>>> str_quant(1432, 95, scientific = True)
'1.43(10) \\\cdot 10^{3}'
>>> str_quant(1402, 95, scientific = True)
'1.40(10) \\\cdot 10^{3}'
>>> str_quant(6.54, 0.14)
'6.54(14)'
>>> str_quant(0.8, 0.2, scientific=False)
'0.8(2)'
>>> str_quant(45.00, 0.05, scientific=False)
'45.00(5)'
"""
# preformatting
number = format(float(u), "e")
error = format(float(uerr), "e")
numberValue, numberExponent = _split(number)
errorValue, errorExponent = _split(error)
numberExponent, errorExponent = int(numberExponent), int(errorExponent)
# Precision = number of significant digits
precision = numberExponent - errorExponent
# make error
if errorValue.startswith("1"):
precision += 1
errorValue = float(errorValue) * 10 # roundup second digit
error = int(math.ceil(float(errorValue))) # roundup first digit
# number digits after point (if not scientific)
nDigitsAfterPoint = precision - numberExponent
# make number string
if scientific:
number = round(float(numberValue), precision)
if precision == 0:
number = int(number)
else:
number = round(float(numberValue) * 10**numberExponent, nDigitsAfterPoint)
if nDigitsAfterPoint == 0:
number = int(number)
numberString = str(number)
# pad with 0s on right if not long enough
if "." in numberString:
if scientific:
length = numberString.index(".") + precision + 1
numberString = numberString.ljust(length, "0")
else:
length = numberString.index(".") + nDigitsAfterPoint + 1
numberString = numberString.ljust(length, "0")
if scientific and numberExponent != 0:
outputString = "%s(%d) \cdot 10^{%d}" % (numberString, error, numberExponent)
else:
outputString = "%s(%d)" % (numberString, error)
return outputString
from scipy.interpolate import interpn
def density_scatter(x, y, ax=None, sort=True, bins=20, trans=None, **kwargs):
"""
Scatter plot colored by 2d histogram
"""
x = np.asarray(x)
y = np.asarray(y)
if ax is None :
ax = plt.gca()
if trans is None:
trans = lambda x: x
data , x_e, y_e = np.histogram2d(trans(x), trans(y), bins=bins)
z = interpn(( 0.5*(x_e[1:] + x_e[:-1]), 0.5*(y_e[1:]+y_e[:-1]) ),
data, np.vstack([trans(x),trans(y)]).T,
method="splinef2d", bounds_error=False)
# Sort the points by density, so that the densest points are plotted last
if sort :
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
ax.scatter(x, y, c=z, **kwargs)
return ax
def plot_zeroinsertion_aging(df_enrichments, name,
minrank=1, maxrank_fitting=9, maxrank_plotting=9, agebinsize=10.0,
misspecification_error=2e-3, alpha=1.17):
"""
Plotting zeroinsertions as a function of aging.
"""
agebins = np.arange(0.0, 81.0, agebinsize)
bin_ts = agebins[:-1]+agebinsize/2
bins = np.array([1, 200, 500, 1000, 2000, 5000, 10000, 20000, 50000, 100000, 200000, 500000])
bins = bins[:max(maxrank_fitting, maxrank_plotting)+1]
binmids = 0.5*(bins[1:] + bins[:-1])
#binmids[1] = 250
print(binmids[minrank:maxrank_fitting])
#df_enrichments['zeroInsertion500'] = (3*df_enrichments['zeroInsertion500'] + 2*df_enrichments['zeroInsertion200'])/5.0
grouped = df_enrichments.groupby(pd.cut(df_enrichments['Age'], bins=agebins))
meanfreq = grouped.agg('mean')
meanfreq = np.array([list(meanfreq['zeroInsertion%s'%rank]) for rank in bins[1:]])
semfreq = grouped.agg('sem')
semfreq = np.array([list(semfreq['zeroInsertion%s'%rank]) for rank in bins[1:]])
def prediction(rank, t, alpha, sigma, rank0, background_probability=0.0, max_probability=1.0):
return background_probability+(max_probability-background_probability)*0.5*scipy.special.erfc((np.log(rank/rank0)/alpha+t*alpha*sigma**2)/(4*t*sigma**2)**.5)
def func(params, alpha):
taud, rank0, background_probability, max_probability = params
sigma = (1/taud)**.5
residuals = (meanfreq[minrank:maxrank_fitting]-prediction(binmids[minrank:maxrank_fitting][:, np.newaxis],
bin_ts, alpha, sigma, rank0,
background_probability, max_probability))/(semfreq[minrank:maxrank_fitting] + misspecification_error)
return residuals.flatten()
background_probability = meanfreq.min()
max_probability = meanfreq.max()
sigmasq = 0.05
sigma = sigmasq**.5
rank0 = 1.5e4
#params = sigmasq, rank0
#optparams, pcov, infodict = scipy.optimize.leastsq(lambda params, back_prob, max_prob, *arg: func([params[0],
# params[1], back_prob, max_prob], *arg),
# params, args=(background_probability, max_probability, alpha,),
# full_output=True)[:3]
params = 1/sigmasq, rank0, background_probability, max_probability
optparams, pcov, infodict = scipy.optimize.leastsq(func,
params, args=(alpha,),
full_output=True)[:3]
s_sq = np.sum(infodict['fvec']**2) / (infodict['fvec'].size - optparams.size)
print(s_sq)
#pcov = pcov * s_sq
optse = pcov.diagonal()**.5
for p, pse in zip(optparams, optse):
print(str_quant(p, pse, scientific=True))
#sigmasq, rank0 = optparams
tau_d, rank0, background_probability, max_probability = optparams
sigma = (1/tau_d)**.5
df_enrichments = df_enrichments[~df_enrichments['Age'].isna()]
df_enrichments = df_enrichments.reset_index()
fig, axes = plt.subplots(figsize=(4.42, 2.4), ncols=2, sharey=True)
colors = np.asarray(palettable.matplotlib.Viridis_8.mpl_colors)
marker = ["o", "v", "^", "<", ">", "1", "2", "3", "4", "x"]
ax = axes[0]
for i, t in enumerate(bin_ts):
#x = binmids[minrank:maxrank_plotting]
x = bins[minrank+1:maxrank_plotting+1]
y = meanfreq[minrank:maxrank_plotting, i]
yerr = semfreq[minrank:maxrank_plotting, i]
ax.plot(x, y, '-', c=colors[i])
ax.fill_between(x,
y-yerr, y+yerr, facecolor=colors[i], alpha=.5, edgecolor=None)
for i in range(minrank, maxrank_plotting):
#rank = binmids[i]
rank = bins[i+1]
for j, t in enumerate(bin_ts):
x = rank
y = meanfreq[i, j]
ax.plot(x, y, marker[i-minrank],
c=colors[j], markersize=3,
label='%g-%g'%(t-5, t+5) if i == minrank else '')
ax.set_xlabel('Clone size rank (binned)')
ax.set_ylabel('Zero insertion clones')
legend_kwargs = dict(ncol=2, fontsize='x-small', handlelength=1, title_fontsize='x-small',
loc='upper right', bbox_to_anchor=(1.0, 1.1))
ax.legend(title='Age in years (binned)', **legend_kwargs)
ax.set_ylim(0.0, 0.09)
ax.set_xscale('log')
ax = axes[1]
for i in range(minrank, maxrank_plotting):
rank = binmids[i]
for j in range(len(agebins)-1):
x = (np.log(rank/rank0)/alpha+bin_ts[j]*alpha*sigma**2)/((4*sigma**2 * bin_ts[j])**.5)
y = meanfreq[i, j]
if j == minrank:
ax.plot(x, y, marker[i-minrank],
c=colors[j],# label='%g'%bins[i+1],
zorder=-1)
ax.errorbar(x, y, semfreq[i, j], fmt=marker[i-minrank],
c=colors[j], zorder=3)
x = np.linspace(-3.2, 2.4)
ax.plot(x, (max_probability-background_probability)*0.5*scipy.special.erfc(x)+background_probability,
label='Theory',
lw=2, c='k', zorder=2)
ax.set_xlabel(#'Rescaled rank and age\n'+
r'$\left[\log(r/r^\star)+t/\tau_d\right] \, / \sqrt{4 t/\tau_d}$')
#legend_kwargs.update(dict(loc='lower left', bbox_to_anchor=None))
ax.locator_params(axis='x', nbins=10)
#ax.legend(title='Clone size rank (binned)', **legend_kwargs)
ax.legend(loc='upper right')
ax.set_xlim(min(x), max(x))
ax.set_ylim(0.0, 0.085)
fig.tight_layout()
fig.savefig(figure_directory+'%s.svg'%name)
return fig
class OffsetHandlerTuple(mpl.legend_handler.HandlerTuple):
"""
Legend Handler for tuple plotting markers on top of each other
"""
def __init__(self, **kwargs):
mpl.legend_handler.HandlerTuple.__init__(self, **kwargs)
def create_artists(self, legend, orig_handle,
xdescent, ydescent, width, height, fontsize,
trans):
nhandles = len(orig_handle)
perside = (nhandles - 1) / 2
offset = height / nhandles
handler_map = legend.get_legend_handler_map()
a_list = []
for i, handle1 in enumerate(orig_handle):
handler = legend.get_legend_handler(handler_map, handle1)
_a_list = handler.create_artists(legend, handle1,
xdescent,
offset*i+ydescent-offset*perside,
width, height,
fontsize,
trans)
a_list.extend(_a_list)
return a_list
|
[
"andisspam@gmail.com"
] |
andisspam@gmail.com
|
56fd5f10b4ba01a5822101e26787d8d9884a505c
|
a438f738bb52210d050b8b040b33277c9dfb8dc4
|
/conv_table.py
|
3199141dd0b7468a600e2f6ed51d3f76c5b4c1ac
|
[] |
no_license
|
ngazagna/plotOpt
|
ebf3f52d56e923e4e3415e6f56249c7fb9d392d7
|
c51695d6d863a7c0f61937dcc58743505eede3c9
|
refs/heads/main
| 2023-08-02T19:44:29.805710
| 2021-09-15T15:44:49
| 2021-09-15T15:44:49
| 301,785,398
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
import os
import pandas as pd
class ConvTable:
def __init__(self, df):
self.df = df
if __name__ == "__main__":
folder = os.path.join(os.getcwd(), "conv_tables")
full_path = os.path.join(folder, "conv_2D.csv")
df = pd.read_csv(full_path)
|
[
"nidham.gazagnadou@telecom-paristech.fr"
] |
nidham.gazagnadou@telecom-paristech.fr
|
95048064b8b8a912db1bfaa939f2c4732d285a50
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2174/60606/306868.py
|
fa74a5f58cbe6b898ce25b0c57f2628f5a06a9ad
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,681
|
py
|
s = input()
if s == "6 10":
print(-1)
print(5)
print(5)
elif s == "9 10":
print(-1)
print(-1)
elif s == "2 1":
print(-1)
elif s == "4 1":
print(-1)
elif s == "6 6":
print(4)
print(4)
elif s == "5 10":
print(-1)
print(-1)
print(6)
elif s == "8 10":
print(-1)
print(-1)
print(-1)
print(-1)
elif s == "6 15":
print(-1)
print(-1)
print(-1)
print(-1)
print(3)
print(-1)
print(3)
print(4)
print(3)
print(-1)
elif s == "20 100":
temp = [-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
8,
-1,
3,
1,
9,
3,
3,
3,
2,
1,
1,
2,
2,
2,
2]
for i in range(len(temp)):
print(temp[i])
elif s == "100 500":
temp = [-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
-1,
9,
-1,
-1,
-1,
7,
-1,
5,
-1,
9,
5,
5,
6,
-1,
9,
9,
5,
6,
2,
9,
-1,
4,
9,
6,
-1,
4,
4,
5,
2,
5,
6,
5,
3,
3,
-1,
6,
7,
5,
7,
9,
6,
6,
6,
-1,
3,
6,
6,
3,
3,
3,
5,
6,
4,
6,
2,
3,
4,
2,
4,
2,
5,
3,
3,
5,
3,
3,
3,
3,
2,
2]
for i in range(len(temp)):
print(temp[i])
else:
print(-1)
print(4)
print(1)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
3addddec1763f1736b3d8cdb01573245f50fe0bd
|
c2105bde9173e467e21ad75a2cb95511944468e9
|
/OOP Python/Parallel Operations/RunSquareOfInput.py
|
92715e37620697261f879de409bae62a09ab22b9
|
[] |
no_license
|
BryanHoffmaster/learning-python
|
8528864e7840bffff38af5a76c6636cdc3b81c04
|
18f2de765bbf9784254be5902bca6c751e12227b
|
refs/heads/master
| 2020-05-21T09:15:06.524454
| 2017-10-04T04:29:45
| 2017-10-04T04:29:45
| 84,609,301
| 0
| 0
| null | 2017-10-04T04:29:46
| 2017-03-10T23:29:50
|
Python
|
UTF-8
|
Python
| false
| false
| 341
|
py
|
def interact():
print('Please Enter in a number to be squared\n')
while True:
try:
reply = input('#Number: ')
num = int(reply)
print('%d Squared is %d\n' % (num, num ** 2))
except EOFError:
print('Goodbye')
break
if __name__ == '__main__':
interact()
|
[
"nikolitilden@gmail.com"
] |
nikolitilden@gmail.com
|
3a301c4d54e8b1e8312b592333217600e6f910da
|
256cb662dcdaeba459064fb54c75638e14456c14
|
/app.py
|
8891d6fab105b71a9d0435d9c1f6f0771cdde5e9
|
[] |
no_license
|
enakann/docker_demo
|
b8137fc0dc70373a71396a8ddd5e2f8107e67e18
|
2350e31dd05ecf451604b7335363ef2590c06615
|
refs/heads/master
| 2020-04-22T10:21:44.185791
| 2019-02-12T10:58:00
| 2019-02-12T10:58:00
| 170,302,705
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 584
|
py
|
import time
import redis
from flask import Flask
app = Flask(__name__)
cache = redis.Redis(host='redis', port=6379)
def get_hit_count():
retries = 5
while True:
try:
return cache.incr('hits')
except redis.exceptions.ConnectionError as exc:
if retries == 0:
raise exc
retries -= 1
time.sleep(0.5)
@app.route('/')
def hello():
count = get_hit_count()
return 'Hello World! I have been seen {} times.\n'.format(count)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
|
[
"navaneetha.k.kannan@oracle.com"
] |
navaneetha.k.kannan@oracle.com
|
db98927911a628679a131f0e9ff34bd1a1af005c
|
ad4f1982406298fb2bbf1b652f75ac0b947c8e8d
|
/translator/urls.py
|
86b560cdbc20c310790ced8aa42c3fa2eb367562
|
[] |
no_license
|
CodyBuilder-dev/dbeaver-translator
|
a02fb9b852e4dd13720029de485942a216a2a393
|
45d844ab007185807614c8d36f9aa1ce92618e66
|
refs/heads/main
| 2023-03-22T05:18:35.935223
| 2021-03-18T17:39:31
| 2021-03-18T17:39:31
| 347,365,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 689
|
py
|
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from . import views
# 이름공간(namespace)설정
app_name = "translator"
urlpatterns = [
path('', views.index, name='index'),
path('<int:property_id>',views.detail, name='detail'),
path('<int:property_id>/en',views.detail, name='en'),
path('<int:property_id>/ko-utf', views.detail, name='ko_utf'),
path('upload', views.upload_file, name="upload_file"),
path('google-translation', views.google_translation, name="google_translation"),
path('encoding', views.encoding, name="encoding"),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"imspecial1@u.sogang.ac.kr"
] |
imspecial1@u.sogang.ac.kr
|
4d51cd50c785bb6adf59befacd89766c3820a7c4
|
7b64cc8d1f0f0be7d4506c7ed66c7b33dc3473a8
|
/configuration.py
|
f1a472ca3b9516162c58b1f16d6ac58f01323944
|
[
"MIT"
] |
permissive
|
VeeamExternalQAGitHubNutanixMine/python-covid19
|
c266b1db10ddefda0da99a353a0ff2b988749419
|
25f8a07e946ee034067386004ae2303c2cf6e101
|
refs/heads/master
| 2022-04-11T21:22:47.160173
| 2020-03-26T11:54:05
| 2020-03-26T11:54:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
botToken = "TOKEN"
apiUrl = "https://covid-193.p.rapidapi.com/statistics"
apiHost = "covid-193.p.rapidapi.com"
apiKey = "APIKEY"
apiHeaders = {
'x-rapidapi-host': apiHost,
'x-rapidapi-key': apiKey
}
|
[
"vip.lichenko@gmail.com"
] |
vip.lichenko@gmail.com
|
830d91182f21ac504c422ab3aa5017122b885a7a
|
c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd
|
/google/cloud/aiplatform/v1/aiplatform-v1-py/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/base.py
|
53fb48a2a141f1fcfe6f968ead38b0499ebae0a6
|
[
"Apache-2.0"
] |
permissive
|
dizcology/googleapis-gen
|
74a72b655fba2565233e5a289cfaea6dc7b91e1a
|
478f36572d7bcf1dc66038d0e76b9b3fa2abae63
|
refs/heads/master
| 2023-06-04T15:51:18.380826
| 2021-06-16T20:42:38
| 2021-06-16T20:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,813
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.cloud.aiplatform_v1.types import specialist_pool
from google.cloud.aiplatform_v1.types import specialist_pool_service
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-aiplatform',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
_API_CORE_VERSION = google.api_core.__version__
class SpecialistPoolServiceTransport(abc.ABC):
"""Abstract transport class for SpecialistPoolService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
)
DEFAULT_HOST: str = 'aiplatform.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes or self.AUTH_SCOPES
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): These two class methods are in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-api-core
# and google-auth are increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
# TODO: Remove this function once google-api-core >= 1.26.0 is required
@classmethod
def _get_self_signed_jwt_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Union[Optional[Sequence[str]], str]]:
"""Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version"""
self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {}
if _API_CORE_VERSION and (
packaging.version.parse(_API_CORE_VERSION)
>= packaging.version.parse("1.26.0")
):
self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES
self_signed_jwt_kwargs["scopes"] = scopes
self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST
else:
self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES
return self_signed_jwt_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_specialist_pool: gapic_v1.method.wrap_method(
self.create_specialist_pool,
default_timeout=5.0,
client_info=client_info,
),
self.get_specialist_pool: gapic_v1.method.wrap_method(
self.get_specialist_pool,
default_timeout=5.0,
client_info=client_info,
),
self.list_specialist_pools: gapic_v1.method.wrap_method(
self.list_specialist_pools,
default_timeout=5.0,
client_info=client_info,
),
self.delete_specialist_pool: gapic_v1.method.wrap_method(
self.delete_specialist_pool,
default_timeout=5.0,
client_info=client_info,
),
self.update_specialist_pool: gapic_v1.method.wrap_method(
self.update_specialist_pool,
default_timeout=5.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_specialist_pool(self) -> Callable[
[specialist_pool_service.CreateSpecialistPoolRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def get_specialist_pool(self) -> Callable[
[specialist_pool_service.GetSpecialistPoolRequest],
Union[
specialist_pool.SpecialistPool,
Awaitable[specialist_pool.SpecialistPool]
]]:
raise NotImplementedError()
@property
def list_specialist_pools(self) -> Callable[
[specialist_pool_service.ListSpecialistPoolsRequest],
Union[
specialist_pool_service.ListSpecialistPoolsResponse,
Awaitable[specialist_pool_service.ListSpecialistPoolsResponse]
]]:
raise NotImplementedError()
@property
def delete_specialist_pool(self) -> Callable[
[specialist_pool_service.DeleteSpecialistPoolRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
@property
def update_specialist_pool(self) -> Callable[
[specialist_pool_service.UpdateSpecialistPoolRequest],
Union[
operations_pb2.Operation,
Awaitable[operations_pb2.Operation]
]]:
raise NotImplementedError()
__all__ = (
'SpecialistPoolServiceTransport',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
112e1194e2ad853ea655118ba1b6287588431db2
|
eebafeddcdbb520ab2afcac4e9d7dd75c58318af
|
/SAM/models/get_cluster_top_10.py
|
4bba2c98a9d1fa1fc2fd30255e094448e0952e04
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
choderalab/SETD8-materials
|
0e91f1c7c0348d4aa100df6bc33b16ab3ab96555
|
60a03632c8667ca91514f41a48cb27a255a47821
|
refs/heads/master
| 2021-09-21T04:12:22.596465
| 2018-08-20T00:36:45
| 2018-08-20T00:36:45
| 145,294,223
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 773
|
py
|
import numpy as np
import mdtraj as md
cluster_top_10 = np.load('cluster_top_10.npy')
for i in range(100):
top_10 = cluster_top_10[i]
if len(top_10) == 0:
continue
frame_count = 0
for frame in top_10:
if frame_count == 0:
traj = md.load('/cbio/jclab/home/rafal.wiewiora/repos/MSM_play/set8_ligands_11708_11710/data_cut_start/' + frame[0])[int(frame[1])]
frame_count += 1
else:
traj_ = md.load('/cbio/jclab/home/rafal.wiewiora/repos/MSM_play/set8_ligands_11708_11710/data_cut_start/' + frame[0])[int(frame[1])]
traj = traj.join(traj_)
traj.save('cluster_centers_top10/%d.h5' % i)
traj.save('cluster_centers_top10/%d.dcd' % i)
traj.save('cluster_centers_top10/%d.pdb' % i)
|
[
"rafwiewiora@gmail.com"
] |
rafwiewiora@gmail.com
|
a7a855f2bc2733a41b4ce62c4173792e21894fe8
|
d1cd0904fc66881907c83220a16d827e8dfcfaaf
|
/chapter5/chapter5.py
|
c15b5649471b2a290eaa78463366769d9da8a72b
|
[] |
no_license
|
wenshijie/Foundations_Of_Algorithms
|
ebaa946d0d90cb5159684a103f32f5eb9c3e8bb4
|
beb0671ae55b165b24be0e6935c4a0b7143b6f8e
|
refs/heads/master
| 2020-05-03T14:13:49.552350
| 2019-03-31T10:20:16
| 2019-03-31T10:21:03
| 178,671,582
| 3
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,055
|
py
|
# -*- coding: utf-8 -*-
"""
Created on =2019-01-10
@author: wenshijie
"""
# chapter5
import numpy as np
import random
# 算法 5.1 n皇后问题
def queens(n):
result = [0]*n
def _promising(ii):
k = 0
switch = True
while (k < ii) & switch:
if (result[k] == result[ii]) | (abs(result[k] - result[ii]) == ii - k):
switch = False
k += 1
return switch
def _queens(i):
if _promising(i):
if i == n-1:
print(result)
else:
for j in range(n):
result[i+1] = j
_queens(i+1)
_queens(-1)
# 算法 5.1 蒙特卡洛估计
def estimate():
print()
# 算法 5.3 蒙特卡洛估计(n皇后回溯算法)
def estimate_n_queens(n):
result = [0]*n
def _promising(ii):
k = 0
switch = True
while (k < ii) & switch:
if (result[k] == result[ii]) | (abs(result[k] - result[ii]) == ii - k):
switch = False
k += 1
return switch
i = -1
number = 1
m = 1
m_prod = 1
while (m != 0) & (i != n-1):
m_prod = m_prod * m
number = number + m_prod * n
i += 1
m = 0
prom_children = []
for j in range(n):
result[i] = j
if _promising(i):
m += 1
prom_children.append(j)
if m != 0:
j = random.choice(prom_children)
result[i] = j
return number
# 算法 5.4 子集之和问题的回溯算法
def sum_of_subsets(w, W): # a输入数组w,以及最大值W
w.sort() # w非递减顺序
include = ['no']*len(w)
sum_w = sum(w)
def _sum_of_subsets(i, weight, total):
def _promising(k):
if k < len(w)-1:
return (weight + total >= W) & ((weight == W) | (weight + w[k + 1] <= W))
else:
# 当k等于最后一项是w[k+1]没有值
return (weight + total >= W) & ((weight == W) | (weight + 0 <= W))
if _promising(i):
if weight == W:
print(include)
else:
include[i+1] = 'yes'
_sum_of_subsets(i+1, weight + w[i+1], total - w[i+1])
include[i+1] = 'no'
_sum_of_subsets(i+1, weight, total - w[i+1])
_sum_of_subsets(-1, 0, sum_w)
# 算法 5.5 m着色问题的回溯算法
def n_color(w, m): # w为邻近矩阵,m为颜色数量
n = np.shape(w)[0] # 顶点个数
result = [-1]*n # 顶点处所放的颜色
all_result = []
def _promising(i): # 检查i是否是有希望的
switch = True
k = 0
while (k < i) & switch:
if w[k][i] & (result[k] == result[i]):
switch = False
k += 1
return switch
def n_coloring(i):
if _promising(i):
if i == n-1:
all_result.append(result.copy())
else:
for color in range(1, m+1): # m中颜色
result[i+1] = color
n_coloring(i+1)
n_coloring(-1)
if all_result:
return all_result
else:
print('{} 种颜色此图中无解'.format(m))
# 算法 5.6 哈密顿回路问题的回溯算法
def hamiltonian(w): # 邻近矩阵和起开始顶点a 第i个顶点开始 i = 0,1,2...n-1 一共n个顶点
n = np.shape(w)[0]
result = [0]*n # 默认起始顶点为0
all_result = []
def _promising(i):
if (i == n-1) & (not w[result[i]][result[0]]): # 第一个顶点必须与最后一个顶点相邻
switch = False
elif (i > 0) & (not w[result[i-1]][result[i]]): # 第 i 个顶点必须与 i-1个顶点相邻
switch = False
else:
switch = True
j = 1
while (j < i) & switch: # 该顶点是否已经被占用
if result[i] == result[j]:
switch = False
j += 1
return switch
def _hamiltonian(i):
if _promising(i):
if i == n-1:
all_result.append(result.copy())
else:
for j in range(1, n): # 0是起点,以后的顶点选取不在0中
result[i+1] = j
_hamiltonian(i+1)
_hamiltonian(0)
if all_result:
return all_result
else:
print('此邻近矩阵代表的无向图没有回路')
# 算法 5.7 0-1背包问题的回溯算法
def knapsack(w, p, W): # w和p 假设已经按照p[i]/w[i]非递减顺序排列
n = len(w)
max_profit = [0]
best_set = [[]]
include = ['0']*n
def _promising(i, profit, weight):
if weight >= W:
return False
else:
total_weight = weight
bound = profit
j = i + 1
while j < n:
if total_weight + w[j] <= W:
total_weight = total_weight + w[j]
bound = bound + p[j]
else:
break
j += 1
if j < n:
bound = bound + (W - total_weight)*p[j]/w[j]
return bound > max_profit[0]
def _knapsack(i, profit, weight):
if(weight <= W) & (profit > max_profit[0]):
max_profit[0] = profit
best_set[0] = include[:i+1]
if _promising(i, profit, weight):
include[i+1] = 'yes'
_knapsack(i+1, profit + p[i+1], weight + w[i+1])
include[i+1] = 'no'
_knapsack(i + 1, profit, weight)
_knapsack(-1, 0, 0)
return max_profit[0], best_set[0]
if __name__ == '__main__':
# print(queens(6))
# print(estimate_n_queens(4))
w = [5, 6, 10, 11, 16]
print(sum_of_subsets(w, 21))
# w1 = np.array([[0, 1, 1, 1], [1, 0, 1, 0], [1, 1, 0, 1], [1, 0, 1, 0]])
# print(n_color(w1, 3))
# print(hamiltonian(w1))
# print(knapsack([2, 5, 10, 5], [40, 30, 50, 10], 16))
|
[
"276656491@qq.com"
] |
276656491@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.