blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd86f23ab21b460ee4463ec6469061d189cf97eb | ed31a4c17c88e39f8ac70f17f8efd057dc334d7e | /1-OOP/test3.py | 6fe4ab8cb0ff4132352025d277f050a36403e143 | [] | no_license | 1216225797/python | e31d4a75f28ef204cac244d8d3e15ca1054fdb07 | a8be63e39740705615000ed7b76e5f8b48c43b41 | refs/heads/master | 2020-04-15T09:10:16.003542 | 2019-07-18T08:19:35 | 2019-07-18T08:19:35 | 164,539,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | # 定义一个集合的操作类
# 包括的方法:
# 1.集合元素添加
# 2.集合元素的交集
# 3.集合元素的差集
# 4.集合元素的并集
class SetInfo():
def __init__(self, s):
self.s = s
def add_value(self, v):
self.s.add(v)
return self.s
def intersection_value(self, s2):
# 取交集的另外一种方法是用 & 符号
return self.s.intersection(s2)
# return self.s & s2
def difference_value(self, s2):
# 取差集集的另外一种方法是用 - 符号
return self.s.difference(s2)
# return self.s - s2
def union_value(self, s2):
# 取并集的另外一种方法是用 | 符号
return self.s.union(s2)
# return self.s | s2
setinfo = SetInfo({1, 2, 3, 4, 5})
# print(setinfo.add_value(6))
# print(setinfo.s)
print(setinfo.difference_value({5, 7, 8})) | [
"1216225797@qq.com"
] | 1216225797@qq.com |
18cb6da4a1dcaa779b3ef0b93d2dd0af8d8ec46b | e4eabccc6d971289cf13653d1b6f290e39b870ab | /1651-shuffle-string/shuffle-string.py | 806acf60e20549daab09a587a9cd68b2470fb226 | [] | no_license | HEroKuma/leetcode | 128b38a9f559dc9e3f21c86a47ede67ad72f7675 | b3045aaedbe98eddc7e4e518a03a9337a63be716 | refs/heads/master | 2023-01-03T12:12:31.018717 | 2020-11-01T16:56:47 | 2020-11-01T16:56:47 | 260,488,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | # Given a string s and an integer array indices of the same length.
#
# The string s will be shuffled such that the character at the ith position moves to indices[i] in the shuffled string.
#
# Return the shuffled string.
#
#
# Example 1:
#
#
# Input: s = "codeleet", indices = [4,5,6,7,0,2,1,3]
# Output: "leetcode"
# Explanation: As shown, "codeleet" becomes "leetcode" after shuffling.
#
#
# Example 2:
#
#
# Input: s = "abc", indices = [0,1,2]
# Output: "abc"
# Explanation: After shuffling, each character remains in its position.
#
#
# Example 3:
#
#
# Input: s = "aiohn", indices = [3,1,4,2,0]
# Output: "nihao"
#
#
# Example 4:
#
#
# Input: s = "aaiougrt", indices = [4,0,2,6,7,3,1,5]
# Output: "arigatou"
#
#
# Example 5:
#
#
# Input: s = "art", indices = [1,0,2]
# Output: "rat"
#
#
#
# Constraints:
#
#
# s.length == indices.length == n
# 1 <= n <= 100
# s contains only lower-case English letters.
# 0 <= indices[i] < n
# All values of indices are unique (i.e. indices is a permutation of the integers from 0 to n - 1).
#
class Solution:
def restoreString(self, s: str, indices: List[int]) -> str:
ans = ['']*len(s)
for i, j in enumerate(indices):
ans[j] = s[i]
return "".join(ans)
| [
"zx8733520+github@gapp.nthu.edu.tw"
] | zx8733520+github@gapp.nthu.edu.tw |
6273169a24e167c2d6402bebc827c0e25f504dd3 | 82a736afc90d728530b5fdaf8f628e892b8f888c | /flash/pointcloud/detection/model.py | 155126d78555c4bc313965de8b4ef3a440bca26b | [
"Apache-2.0"
] | permissive | mahdipira/lightning-flash | e3b725c01e37de1a933b627017a3077400540bc7 | 3b88e74e54cf8269e41859e3d218973d9e1e9979 | refs/heads/master | 2023-08-11T00:33:00.510246 | 2021-09-14T22:51:38 | 2021-09-14T22:51:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,765 | py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Tuple, Type, Union
import torch
import torchmetrics
from torch import nn
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data import DataLoader, Sampler
from flash.core.data.auto_dataset import BaseAutoDataset
from flash.core.data.data_source import DefaultDataKeys
from flash.core.data.process import Serializer
from flash.core.data.states import CollateFn
from flash.core.model import Task
from flash.core.registry import FlashRegistry
from flash.core.utilities.apply_func import get_callable_dict
from flash.core.utilities.imports import _POINTCLOUD_AVAILABLE
from flash.pointcloud.detection.backbones import POINTCLOUD_OBJECT_DETECTION_BACKBONES
__FILE_EXAMPLE__ = "pointcloud_detection"
class PointCloudObjectDetectorSerializer(Serializer):
pass
class PointCloudObjectDetector(Task):
"""The ``PointCloudObjectDetector`` is a :class:`~flash.core.classification.ClassificationTask` that classifies
pointcloud data.
Args:
num_features: The number of features (elements) in the input data.
num_classes: The number of classes (outputs) for this :class:`~flash.core.model.Task`.
backbone: The backbone name (or a tuple of ``nn.Module``, output size) to use.
backbone_kwargs: Any additional kwargs to pass to the backbone constructor.
loss_fn: The loss function to use. If ``None``, a default will be selected by the
:class:`~flash.core.classification.ClassificationTask` depending on the ``multi_label`` argument.
optimizer: The optimizer or optimizer class to use.
optimizer_kwargs: Additional kwargs to use when creating the optimizer (if not passed as an instance).
scheduler: The scheduler or scheduler class to use.
scheduler_kwargs: Additional kwargs to use when creating the scheduler (if not passed as an instance).
metrics: Any metrics to use with this :class:`~flash.core.model.Task`. If ``None``, a default will be selected
by the :class:`~flash.core.classification.ClassificationTask` depending on the ``multi_label`` argument.
learning_rate: The learning rate for the optimizer.
multi_label: If ``True``, this will be treated as a multi-label classification problem.
serializer: The :class:`~flash.core.data.process.Serializer` to use for prediction outputs.
lambda_loss_cls: The value to scale the loss classification.
lambda_loss_bbox: The value to scale the bounding boxes loss.
lambda_loss_dir: The value to scale the bounding boxes direction loss.
"""
backbones: FlashRegistry = POINTCLOUD_OBJECT_DETECTION_BACKBONES
required_extras: str = "pointcloud"
def __init__(
self,
num_classes: int,
backbone: Union[str, Tuple[nn.Module, int]] = "pointpillars_kitti",
backbone_kwargs: Optional[Dict] = None,
head: Optional[nn.Module] = None,
loss_fn: Optional[Callable] = None,
optimizer: Union[Type[torch.optim.Optimizer], torch.optim.Optimizer] = torch.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
scheduler: Optional[Union[Type[_LRScheduler], str, _LRScheduler]] = None,
scheduler_kwargs: Optional[Dict[str, Any]] = None,
metrics: Union[torchmetrics.Metric, Mapping, Sequence, None] = None,
learning_rate: float = 1e-2,
serializer: Optional[Union[Serializer, Mapping[str, Serializer]]] = PointCloudObjectDetectorSerializer(),
lambda_loss_cls: float = 1.0,
lambda_loss_bbox: float = 1.0,
lambda_loss_dir: float = 1.0,
):
super().__init__(
model=None,
loss_fn=loss_fn,
optimizer=optimizer,
optimizer_kwargs=optimizer_kwargs,
scheduler=scheduler,
scheduler_kwargs=scheduler_kwargs,
metrics=metrics,
learning_rate=learning_rate,
serializer=serializer,
)
self.save_hyperparameters()
if backbone_kwargs is None:
backbone_kwargs = {}
if isinstance(backbone, tuple):
self.backbone, out_features = backbone
else:
self.model, out_features, collate_fn = self.backbones.get(backbone)(**backbone_kwargs)
self.backbone = self.model.backbone
self.neck = self.model.neck
self.set_state(CollateFn(collate_fn))
self.set_state(CollateFn(collate_fn))
self.set_state(CollateFn(collate_fn))
self.loss_fn = get_callable_dict(self.model.loss)
if __FILE_EXAMPLE__ not in sys.argv[0]:
self.model.bbox_head.conv_cls = self.head = nn.Conv2d(
out_features, num_classes, kernel_size=(1, 1), stride=(1, 1)
)
def compute_loss(self, losses: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
losses = losses["loss"]
return (
self.hparams.lambda_loss_cls * losses["loss_cls"]
+ self.hparams.lambda_loss_bbox * losses["loss_bbox"]
+ self.hparams.lambda_loss_dir * losses["loss_dir"]
)
def compute_logs(self, logs: Dict[str, Any], losses: Dict[str, torch.Tensor]):
logs.update({"loss": self.compute_loss(losses)})
return logs
def training_step(self, batch: Any, batch_idx: int) -> Any:
return super().training_step((batch, batch), batch_idx)
def validation_step(self, batch: Any, batch_idx: int) -> Any:
super().validation_step((batch, batch), batch_idx)
def test_step(self, batch: Any, batch_idx: int) -> Any:
super().validation_step((batch, batch), batch_idx)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
results = self.model(batch)
boxes = self.model.inference_end(results, batch)
return {
DefaultDataKeys.INPUT: getattr(batch, "point", None),
DefaultDataKeys.PREDS: boxes,
DefaultDataKeys.METADATA: [a["name"] for a in batch.attr],
}
def forward(self, x) -> torch.Tensor:
"""First call the backbone, then the model head."""
# hack to enable backbone to work properly.
self.model.device = self.device
return self.model(x)
def _process_dataset(
self,
dataset: BaseAutoDataset,
batch_size: int,
num_workers: int,
pin_memory: bool,
collate_fn: Callable,
shuffle: bool = False,
drop_last: bool = True,
sampler: Optional[Sampler] = None,
) -> DataLoader:
if not _POINTCLOUD_AVAILABLE:
raise ModuleNotFoundError("Please, run `pip install flash[pointcloud]`.")
dataset.preprocess_fn = self.model.preprocess
dataset.transform_fn = self.model.transform
return DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
shuffle=shuffle,
drop_last=drop_last,
sampler=sampler,
)
| [
"noreply@github.com"
] | noreply@github.com |
3a1d3308126fb1c080d42123898ce41d4a3cc0db | 9e49e30b70de98649149dafb711447ba52262d33 | /recommandation/forms/profileForm.py | a62ce88bb861cea5e7f6190dac7f90caf1f5bc42 | [] | no_license | EasyProjectGTID/Give-me-a-movie | 30cf5fbd5c6cdc76381a0a19fe009442abca1d13 | 0fa0f0a5d4b696d48d79c13727e69fd8b104ef05 | refs/heads/master | 2020-04-07T18:04:34.099339 | 2019-06-10T14:56:28 | 2019-06-10T14:56:28 | 158,595,866 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | from django import forms
class ProfilForm(forms.Form):
"""
Pour la page de login
"""
username = forms.CharField(
widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Compte utilisateur'}))
email = forms.EmailField(
widget=forms.TextInput(attrs={'class': "form-control", 'type': "text", 'placeholder': "Email"}))
| [
"hhoutmann@gmail.com"
] | hhoutmann@gmail.com |
8334d00aecbe667d6334e195d764962a73cfa895 | eee3c44c595ad2eccecff03a9be75fbcc52944a3 | /22/aoc-2020-22.py | 7f3bd2278e7e3b12b40a7f0eedd8ad93d519b25c | [] | no_license | wjholden/Advent-of-Code-2020 | 796ce0cc6e86a0edfeacf275b5321de61b30db60 | 072fe3ab7d29338c03ae9eb51ea2509da469b873 | refs/heads/master | 2023-02-13T04:11:34.765278 | 2020-12-27T13:46:25 | 2020-12-27T13:46:25 | 317,175,790 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | with open('input.txt') as f:
players = f.read().split('\n\n')
p1 = [int(i) for i in players[0].split()[2:]]
p2 = [int(i) for i in players[1].split()[2:]]
def round():
global p1
global p2
#print("Player 1's deck:", p1)
#print("Player 2's deck:", p2)
m1 = p1.pop(0)
m2 = p2.pop(0)
#print('Player 1 plays:', m1)
#print('Player 2 plays:', m2)
if m1 > m2:
#print('Player 1 wins the round!')
p1.append(m1)
p1.append(m2)
else:
#print('Player 2 wins the round!')
p2.append(m2)
p2.append(m1)
# There is no intersection of p1 and p2.
def play():
global p1
global p2
games = 0
while len(p1) > 0 and len(p2) > 0:
round()
games += 1
score = 0
if len(p1) == 0:
winner = p2
else:
winner = p1
#print('== Post-game results ==')
#print("Player 1's deck:", p1)
#print("Player 2's deck:", p2)
for i in range(len(winner)):
#print('Winning players score +=', winner[i], '*', len(winner) - i)
score += winner[i] * (len(winner) - i)
return score
print('Part 1:', play())
| [
"wjholden@gmail.com"
] | wjholden@gmail.com |
92149e98faa52eebca609f4efe75225452feba29 | 49b1671e85fe800d89faa15ed1499014c0b75fad | /toggle.py | a4bc209049b3575f06190ec75f047daf11c73377 | [] | no_license | NBPat/mightyC | 17f53fb22704b50f8a89df3fc5c254cb91c1d0fe | ace8218eb5f87fd9a3bd577c2a0f694b02eeb826 | refs/heads/master | 2021-02-28T08:37:37.046859 | 2020-03-09T15:41:15 | 2020-03-09T15:41:15 | 245,678,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | # toggle p0
from microbit import *
while True:
pin0.write_digital(0)
sleep(500)
pin0.write_digital(1)
sleep(500)
| [
"noreply@github.com"
] | noreply@github.com |
8c58420b17067a043d83a13e81e8f3ba80e49f63 | c12b491e9a5c855a1fe835acdbc86a3ee2062d88 | /clean_conllu.py | 9bcf3e859f1eaf0198d898825f38d32a1b5b2849 | [] | no_license | ikwattro/corenlp-swedish-depparse-model | fd8def78a8e60b8d77c6bcc0bed2e641136a95f8 | fd1629ad486d15fa08e21cabb4d0a21330475f93 | refs/heads/master | 2020-04-24T12:23:27.665258 | 2017-11-22T18:13:32 | 2017-11-22T18:13:32 | 171,953,162 | 1 | 0 | null | 2019-02-21T22:10:41 | 2019-02-21T22:10:41 | null | UTF-8 | Python | false | false | 210 | py | # clean conll files
with open('sv-ud-dev.conllu') as f:
content = f.readlines()
content = [x for x in content if not x.startswith('#')]
with open('swedish-dev.conllu', 'w') as p:
p.write("".join(content)) | [
"andreas.klintberg@meltwater.com"
] | andreas.klintberg@meltwater.com |
234f2f9400de6d64a16b1c44c642412687a10389 | 75da525ace4ee9d994f37b25405cbf878523eb55 | /RLForTrade.py | f9752deb3a03a7232f89a44670188821e20d2bce | [] | no_license | wuter/RLTrading | 06c4a09cd9bb23cb2d0f30c0b53639405e149d1e | 1141059061804486e83498dd634edef9259089df | refs/heads/master | 2020-03-21T04:29:24.964724 | 2018-06-21T02:50:20 | 2018-06-21T02:50:20 | 138,110,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,680 | py | import numpy as np
import tensorflow as tf
import pandas as pd
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from Market import Market
np.random.seed(5)
tf.set_random_seed(5) # reproducible
# Superparameters
OUTPUT_GRAPH = False
MAX_EPISODE = 1000
RENDER = False # rendering wastes time
GAMMA = 0.9 # reward discount in TD error
LR_A = 0.0001 # learning rate for actor
LR_C = 0.001 # learning rate for critic
REG = 0.0001
class Actor(object):
def __init__(self, sess, n_features, n_actions, lr=0.001):
self.sess = sess
#这里稍微注意:因为AC框架可以使用单步更新,所以s的大小为1*n_features
self.s = tf.placeholder(tf.float32, [1, n_features], "state") # 1*n_features
self.a = tf.placeholder(tf.int32, None, "act") #
self.td_error = tf.placeholder(tf.float32, None, "td_error") # TD_error
with tf.variable_scope('Actor'):
l1 = tf.layers.dense(
inputs=self.s,
units=32, # number of hidden units
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l1',
kernel_regularizer= tf.contrib.layers.l1_regularizer(REG)
)
l2 = tf.layers.dense(
inputs=l1,
units=32, # number of hidden units
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l2',
kernel_regularizer= tf.contrib.layers.l1_regularizer(REG)
)
l3 = tf.layers.dense(
inputs=l2,
units=32, # number of hidden units
activation=tf.nn.relu,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l3',
kernel_regularizer= tf.contrib.layers.l1_regularizer(REG)
)
self.acts_nextprob = tf.layers.dense(
inputs=l3,
units=n_actions, # output units
activation=tf.nn.softmax, # get action probabilities
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='acts_nextprob',
kernel_regularizer = tf.contrib.layers.l1_regularizer(REG)
)
with tf.variable_scope('exp_v'):
log_prob = tf.log(self.acts_nextprob[0, self.a]+1e-3)
self.exp_v = tf.reduce_mean(log_prob * self.td_error) # advantage (TD_error) guided loss
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(-self.exp_v) # minimize(-exp_v) = maximize(exp_v)
def learn(self, s, a, td):
s = s[np.newaxis, :]
feed_dict = {self.s: s, self.a: a, self.td_error: td}
_, exp_v = self.sess.run([self.train_op, self.exp_v], feed_dict)
return exp_v
def choose_action(self, s):
s = s[np.newaxis, :]
probs = self.sess.run(self.acts_nextprob, {self.s: s}) # get probabilities for all actions
return np.random.choice(np.arange(probs.shape[1]), p=probs.ravel()) # return a int
class Critic(object):
def __init__(self, sess, n_features, lr=0.01):
self.sess = sess
self.s = tf.placeholder(tf.float32, [1, n_features], "state")
self.v_ = tf.placeholder(tf.float32, [1, 1], "v_next")
self.r = tf.placeholder(tf.float32, None, 'r')
with tf.variable_scope('Critic'):
l1 = tf.layers.dense(
inputs=self.s,
units=32, # number of hidden units
activation=tf.nn.relu, # None
# have to be linear to make sure the convergence of actor.
# But linear approximator seems hardly learns the correct Q.
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l1',
kernel_regularizer=tf.contrib.layers.l1_regularizer(REG)
)
l2 = tf.layers.dense(
inputs=l1,
units=32, # number of hidden units
activation=tf.nn.relu, # None
# have to be linear to make sure the convergence of actor.
# But linear approximator seems hardly learns the correct Q.
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l2',
kernel_regularizer=tf.contrib.layers.l1_regularizer(REG)
)
l3 = tf.layers.dense(
inputs=l2,
units=32, # number of hidden units
activation=tf.nn.relu, # None
# have to be linear to make sure the convergence of actor.
# But linear approximator seems hardly learns the correct Q.
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='l3',
kernel_regularizer=tf.contrib.layers.l1_regularizer(REG)
)
self.v = tf.layers.dense(
inputs=l3,
units=1, # 这里输出表示当前state下动作的值函数
activation=None,
kernel_initializer=tf.random_normal_initializer(0., .1), # weights
bias_initializer=tf.constant_initializer(0.1), # biases
name='V',
kernel_regularizer=tf.contrib.layers.l1_regularizer(REG)
)
with tf.variable_scope('squared_TD_error'):
# self.v 当前state下的值函数
# self.v_ 下一个状态的值函数
# self.r 当前状态下reward
self.td_error = self.r + GAMMA * self.v_ - self.v
self.loss = tf.square(self.td_error) # TD_error = (r+gamma*V_next) - V_eval
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)
def learn(self, s, r, s_next):
s, s_next = s[np.newaxis, :], s_next[np.newaxis, :]
v_ = self.sess.run(self.v, {self.s: s_next})
td_error, _ = self.sess.run([self.td_error, self.train_op],
{self.s: s, self.v_: v_, self.r: r})
return td_error
def test(env,actor,critic,op_path):
s = env.reset_for_test()
t = 0
track_r = []
prob = []
while True:
a = actor.choose_action(s)
s_next, r, done, info = env.step(a)
op_path.append((a, env.state, r, env.close[env.time], env.close[env.time - 1]))
track_r.append(r)
# actor 将在s状态下计算得到的r和s_next传入个给critic, 分别计算出S和S_next对应的value(V和V_)
# 将计算得到的奖励至td_error传递给actor,代替police gradient中的tf_vt
# td_error = critic.learn(s, r, s_next) # gradient = grad[r + gamma * V(s_next) - V(s)]
# actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error]
s = s_next
t += 1
if done :
prob.append((np.array(track_r)+1).prod())
print('测试轮的收益是%f'%(prob[-1]))
break
env = Market()
N_F = env.observation_space.shape[1]+1
N_A = len(env.action_space)
sess = tf.Session()
actor = Actor(sess, n_features=N_F, n_actions=N_A, lr=LR_A)
critic = Critic(sess, n_features=N_F, lr=LR_C) # we need a good teacher, so the teacher should learn faster than the actor
sess.run(tf.global_variables_initializer())
if OUTPUT_GRAPH:
summary_writer = tf.summary.FileWriter("logs/", sess.graph)
prob = []
ep_rs_nextsum = []
for i_episode in range(MAX_EPISODE):
s = env.reset()
t = 0
track_r = []
path = []
loss = []
while True:
a = actor.choose_action(s)
s_next, r, done, info = env.step(a)
# print(( a, env.state, r, env.close[env.time],env.close[env.time-1]))
track_r.append(r)
# actor 将在s状态下计算得到的r和s_next传入个给critic, 分别计算出S和S_next对应的value(V和V_)
# 将计算得到的奖励至td_error传递给actor,代替police gradient中的tf_vt
td_error = critic.learn(s, r, s_next) # gradient = grad[r + gamma * V(s_next) - V(s)]
loss.append(td_error)
actor.learn(s, a, td_error) # true_gradient = grad[logPi(s,a) * td_error]
s = s_next
t += 1
if done :
ep_rs_nextsum.append(env.cash)
prob.append((np.array(track_r)+1).prod())
print('loss', np.mean(td_error))
print('第%d轮的收益是%f'%(i_episode, prob[-1]))
break
op_path = []
test(env,actor,critic,op_path)
plt.figure()
plt.plot(range(len(prob)),prob)
plt.show()
for x in op_path:
print(x)
| [
"noreply@github.com"
] | noreply@github.com |
72b4b3a87f62dba1dfc842f832c5cdb8caf9ab99 | b68723013bdd990176625b9e4e5c0a61a89c1ce6 | /photoenv/lib/python3.5/types.py | 71566f81e684dcdbeb0c3e20ef4d235deadf3e64 | [] | no_license | TZhouEnigma/InstagramClone | 00ca37a1dc8212fb53ab978486d4d4357cc57118 | 4b3ae3381de54a969832fdb2dec14b82afcb591e | refs/heads/master | 2021-01-23T01:26:21.257975 | 2017-05-10T19:01:18 | 2017-05-10T19:01:18 | 85,908,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | /Users/chuxiangzhou/anaconda/lib/python3.5/types.py | [
"chuxiangzhou@CHUXIANGs-MacBook.local"
] | chuxiangzhou@CHUXIANGs-MacBook.local |
9876a9af35eb3649f4f3c68253359af8c252f427 | 54df8336b50e8f2d7dbe353f0bc51a2b3489095f | /Front End/Kivy/project8/pro8.py | ca78447ed453ab88b83ef4fdd5468ca01be6e9f2 | [] | no_license | SurendraKumarAratikatla/MyLenovolapCodes1 | 42d5bb7a14bfdf8d773ee60719380ee28ff4947a | 12c56200fcfd3e5229bfeec209fd03b5fc35b823 | refs/heads/master | 2023-06-17T15:44:18.312398 | 2021-07-19T10:28:11 | 2021-07-19T10:28:11 | 387,358,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,288 | py | from kivy.uix.screenmanager import ScreenManager, Screen
from kivymd.app import MDApp
from kivymd.uix.screen import Screen
from kivymd.uix.textfield import MDTextField
from kivy.lang import Builder
from kivymd.uix.label import MDLabel, MDIcon
from helpers8 import screen_help
from kivymd.uix.button import MDRectangleFlatButton
from kivy.core.window import Window
from kivymd.uix.list import OneLineListItem
from kivy.uix.scrollview import ScrollView
from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem
from kivymd.uix.list import OneLineIconListItem, IconLeftWidget
from kivymd.uix.button import MDFloatingActionButtonSpeedDial
from kivymd.theming import ThemableBehavior
from kivymd.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
import sqlite3 as sql
import re
from kivymd.uix.taptargetview import MDTapTargetView
KV = '''
Screen:
MDFloatingActionButton:
id: button
icon: "head-question"
pos: 10, 10
on_release: app.tap_target_start()
elevation_normal: 10
'''
Window.size = (350, 600)
class MenuScreen(Screen):
pass
class UserCustomerScreen(Screen):
pass
class ProfileScreen(Screen):
mobile: ObjectProperty()
user: ObjectProperty()
address: ObjectProperty()
def get_started(self):
print('here we go')
def add_user(self):
con = sql.connect('user.db')
cur = con.cursor()
cur.execute(""" INSERT INTO id (mobile,user,address) VALUES (?,?,?)""", (self.mobile.text, self.user.text, self.address.text))
con.commit()
con.close()
screen = Screen()
mobile_no_string = self.mobile.text
print(self.mobile.text)
print(self.user.text)
print(self.address.text)
print(len(self.mobile.text))
if re.match("^[0-9]\d{10}$", self.mobile.text) == None:
pass
else:
label = MDLabel(text='*You entered incorrect mobile number,', theme_text_color='Custom',
text_color=(0, 1, 0, 1), font_style='H6', pos_hint={'center_x': 0.5, 'center_y': 0.3})
screen.add_widget(label)
class AllItemsScreen(Screen):
pass
class RationScreen(Screen):
pass
class BellScreen(Screen):
pass
class FreshEggsScrren(Screen):
pass
class ContentNavigationDrawer(BoxLayout):
pass
class AboutScreen(Screen):
pass
class NotificationScreen(Screen):
pass
class AboutRationScreen(Screen):
pass
# Create the screen manager
sm = ScreenManager()
sm.add_widget(MenuScreen(name='menu'))
sm.add_widget(AllItemsScreen(name='usercustomer'))
sm.add_widget(ProfileScreen(name='profile'))
sm.add_widget(AllItemsScreen(name='allitems'))
sm.add_widget(AllItemsScreen(name='ration'))
sm.add_widget(AllItemsScreen(name='eggs'))
sm.add_widget(AllItemsScreen(name='aboutration'))
class DrawerList(ThemableBehavior, MDList):
pass
class DemoApp(MDApp):
data = {
'basket': 'Today Offers',
'offer': 'Discounts',
'cart': 'Cart Page',
}
try:
con = sql.connect('user.db')
cur = con.cursor()
cur.execute(""" CREATE TABLE id(
mobile text,
user text,
address text)
""")
con.commit()
con.close()
except:
pass
def build(self):
#self.theme_cls.theme_style = 'Dark'
#screen = Screen()
firstpage = Builder.load_string(screen_help)
screen = Builder.load_string(KV)
self.tap_target_view = MDTapTargetView(
widget=screen.ids.button,
title_text="VZM Store",
description_text='''Anyone can login as a user and
you can publish your products to customers''',
widget_position="left_bottom",
target_circle_color=(142/255.0, 172/255.0, 249/255.0),
)
screen.add_widget(firstpage)
return screen
def navigation_draw(self):
sm = ScreenManager()
sm.add_widget(AllItemsScreen(name='bell'))
def tap_target_start(self):
if self.tap_target_view.state == "close":
self.tap_target_view.start()
else:
self.tap_target_view.stop()
if __name__ == '__main__':
DemoApp().run()
| [
"suendra.aratikatla1608@gmail.com"
] | suendra.aratikatla1608@gmail.com |
2847b874f3e1df1652fe93cb406913574a4bd82c | e7bfe1de1cb66bf82d1723896eb7a9fb9fc843ac | /cesar.py | 62401cbc475f87a05ef3ae21749e3e29f3a68e9a | [] | no_license | Derekas/module1_p2 | 14ea6e648a2189b05592a6053b2d91c7cc3d4631 | af8f6a8b44393a59bc87dc80905c511cc4607c69 | refs/heads/master | 2023-09-02T10:17:22.757017 | 2021-11-14T18:00:30 | 2021-11-14T18:00:30 | 428,001,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | from typing import Text
def encrypt(text,s):
text=text.upper()
result=""
for i in text:
if i.isspace():
result+=""
continue
result+= text[(text.index(i)+shift)%len(text)]
return result
text=input("Introduce un texto")
shift=int(input("Introduce el shift"))
print(encrypt(text,shift))
| [
"derekinformatica619@gmail.com"
] | derekinformatica619@gmail.com |
b8adf479dd7dc6ab41347cb682ab8c219f906b4c | e98500bcaa07f164126c4c819e519ecb67b63049 | /scripts/python/travis-before-install.py | 2b96d96945d68acf49ba9ba29828911e72b345b1 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | drodri/core | 6cc05f6af4f725d19b1084bceeba42b3a7a267a8 | ffc52c9c2049546828b20440f176c36cb73e6be6 | refs/heads/master | 2021-01-20T18:58:05.981071 | 2015-03-12T14:49:49 | 2015-03-12T14:49:49 | 32,041,598 | 0 | 0 | null | 2015-03-11T21:15:30 | 2015-03-11T21:15:30 | null | UTF-8 | Python | false | false | 2,642 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from utility import make_executable
from utility import DownloadError
from utility import EnvironError
from utility import download
from utility import execute
from utility import getenv
from utility import which
from utility import exit
from os import mkdir
cmake_script_url = 'http://cmake.org/files/v3.1/cmake-3.1.3-Linux-x86_64.sh'
llvm_key_url = 'http://llvm.org/apt/llvm-snapshot.gpg.key'
repository = {
'3.4': 'deb http://llvm.org/apt/precise llvm-toolchain-precise-3.4 main',
'3.5': 'deb http://llvm.org/apt/precise llvm-toolchain-precise-3.5 main',
'3.6': 'deb http://llvm.org/apt/precise llvm-toolchain-precise-3.6 main',
}
# Used to build libcxx when USE_LIBCXX=ON
libcxx_svn = {
'3.4': 'http://llvm.org/svn/llvm-project/libcxx/tags/RELEASE_342/final/',
'3.5': 'http://llvm.org/svn/llvm-project/libcxx/tags/RELEASE_350/final/',
'3.6': 'http://llvm.org/svn/llvm-project/libcxx/tags/RELEASE_360/final/'
}
backport = 'ppa:ubuntu-toolchain-r/test'
if __name__ == '__main__':
print('Checking environment variables...')
try:
use_libcxx = getenv('USE_LIBCXX')
version = getenv('PACKAGE')
cxx = getenv('CXX')
except EnvironError as e: exit(e)
libcxx = use_libcxx == 'ON'
clang = 'clang' in cxx
# download the cmake install script, mark executable
print('Downloading CMake Installer...')
try: download(cmake_script_url, 'cmake-amd64.sh')
except DownloadError as e: exit(e)
make_executable('cmake-amd64.sh')
# add gcc to repos
print('Adding GCC Repository...')
execute('sudo', 'add-apt-repository', '--yes', backport)
if clang:
# download the llvm package key for the repositories
print('Downloading LLVM Key...')
try: download(llvm_key_url, 'llvm.key')
except DownloadError as e: exit(e)
# add llvm key to apt
print('Adding LLVM Key to APT...')
execute('sudo', 'apt-key', 'add', 'llvm.key')
# add the appropriate 'PPA'
print('Adding Clang APT Repository...')
execute('sudo', 'add-apt-repository', '--yes', repository[version])
if libcxx:
# checkout libcxx if use_libcxx is set to ON
print('Checking out libcxx...')
execute('svn', 'co', libcxx_svn[version], 'libcxx')
try: mkdir('libcxx-build')
except OSError as e: exit(e)
print('Removing system provided cmake...')
execute('sudo', 'apt-get', 'purge', 'cmake')
print('Updating APT...')
execute('sudo', 'apt-get', 'update')
| [
"tres.walsh@mnmlstc.com"
] | tres.walsh@mnmlstc.com |
faf3b5ffc73b80f5cb5728f55014305a2b80da4e | 738aedb8035e49951f83ce3f4291eee149cad5fb | /OB Damage - Li-Hopfield Model/All the code/SLURM setup files/dir_setup_OI-flat_10_2D.py | 2e83d567ac9f3004eca045a3289376859981b1dd | [] | no_license | jkberry07/OB_PD_Model | fb453303bfa64c1a3a43c7d81d2b5373950e1f4d | 1ce30205354dc30cab4673e406988bfa76390238 | refs/heads/master | 2022-11-21T09:39:09.692654 | 2020-07-25T23:25:11 | 2020-07-25T23:25:11 | 282,358,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 23 16:09:06 2019
@author: wmmjk
"""
import os
f = open('dir_setup_OI-flat_10_2D.sh','w+')
here = os.path.dirname(os.path.realpath(__file__))
subdir1 = 'OI-flat_10_2D'
f.write('mkdir '+subdir1+'\n')
f.write('cp OI-flat_10_2D.py '\
+'H0_10_2D_65Hz.npy W0_10_2D_65Hz.npy '+subdir1+'\n')
| [
"noreply@github.com"
] | noreply@github.com |
89e6683e391279884270bae480df6b3a56146ac5 | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /comisr/lib/model.py | b3f2d2423bbd3b56d18ce8e090b7122e47b40d2c | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 4,846 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model functions to reconstruct models."""
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from comisr.lib import ops
# Definition of the fnet, more details can be found in TecoGAN paper
def fnet(fnet_input, reuse=False):
"""Flow net."""
def down_block(inputs, output_channel=64, stride=1, scope='down_block'):
with tf.variable_scope(scope):
net = ops.conv2(
inputs, 3, output_channel, stride, use_bias=True, scope='conv_1')
net = ops.lrelu(net, 0.2)
net = ops.conv2(
net, 3, output_channel, stride, use_bias=True, scope='conv_2')
net = ops.lrelu(net, 0.2)
net = ops.maxpool(net)
return net
def up_block(inputs, output_channel=64, stride=1, scope='up_block'):
with tf.variable_scope(scope):
net = ops.conv2(
inputs, 3, output_channel, stride, use_bias=True, scope='conv_1')
net = ops.lrelu(net, 0.2)
net = ops.conv2(
net, 3, output_channel, stride, use_bias=True, scope='conv_2')
net = ops.lrelu(net, 0.2)
new_shape = tf.shape(net)[1:-1] * 2
net = tf2.image.resize(net, new_shape)
return net
with tf.variable_scope('autoencode_unit', reuse=reuse):
net = down_block(fnet_input, 32, scope='encoder_1')
net = down_block(net, 64, scope='encoder_2')
net = down_block(net, 128, scope='encoder_3')
net = up_block(net, 256, scope='decoder_1')
net = up_block(net, 128, scope='decoder_2')
net1 = up_block(net, 64, scope='decoder_3')
with tf.variable_scope('output_stage'):
net = ops.conv2(net1, 3, 32, 1, scope='conv1')
net = ops.lrelu(net, 0.2)
net2 = ops.conv2(net, 3, 2, 1, scope='conv2')
net = tf.tanh(net2) * 24.0
# the 24.0 is the max Velocity, details can be found in TecoGAN paper
return net
def generator_f_encoder(gen_inputs, num_resblock=10, reuse=False):
"""Generator function encoder."""
# The Bx residual blocks
def residual_block(inputs, output_channel=64, stride=1, scope='res_block'):
with tf.variable_scope(scope):
net = ops.conv2(
inputs, 3, output_channel, stride, use_bias=True, scope='conv_1')
net = tf.nn.relu(net)
net = ops.conv2(
net, 3, output_channel, stride, use_bias=True, scope='conv_2')
net = net + inputs
return net
with tf.variable_scope('generator_unit', reuse=reuse):
# The input layer
with tf.variable_scope('input_stage'):
net = ops.conv2(gen_inputs, 3, 64, 1, scope='conv')
stage1_output = tf.nn.relu(net)
net = stage1_output
# The residual block parts
for i in range(1, num_resblock + 1,
1): # should be 16 for TecoGAN, and 10 for TecoGANmini
name_scope = 'resblock_%d' % (i)
net = residual_block(net, 64, 1, name_scope)
return net
def generator_f_decoder(net,
gen_inputs,
gen_output_channels,
vsr_scale,
reuse=False):
"""Generator function decoder."""
with tf.variable_scope('generator_unit', reuse=reuse):
with tf.variable_scope('conv_tran2highres'):
if vsr_scale == 2:
net = ops.conv2_tran(
net, kernel=3, output_channel=64, stride=2, scope='conv_tran1')
net = tf.nn.relu(net)
if vsr_scale == 4:
net = ops.conv2_tran(net, 3, 64, 2, scope='conv_tran1')
net = tf.nn.relu(net)
net = ops.conv2_tran(net, 3, 64, 2, scope='conv_tran2')
net = tf.nn.relu(net)
with tf.variable_scope('output_stage'):
net = ops.conv2(net, 3, gen_output_channels, 1, scope='conv')
low_res_in = gen_inputs[:, :, :, 0:3] # ignore warped pre high res
bicubic_hi = ops.bicubic_x(low_res_in, scale=vsr_scale) # can put on GPU
net = net + bicubic_hi
net = ops.preprocess(net)
return net
# Definition of the generator.
def generator_f(gen_inputs,
gen_output_channels,
num_resblock=10,
vsr_scale=4,
reuse=False):
net = generator_f_encoder(gen_inputs, num_resblock, reuse)
net = generator_f_decoder(net, gen_inputs, gen_output_channels, vsr_scale,
reuse)
return net
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
8685c3131ab6f05892fa4bb1a02fb52814f79a92 | 5332ecf5b6bed264bc8da450c14157f84aa5e5c8 | /server/models/user.py | aa63d51f62ab5b95a78fd99a8475ec51079acd59 | [] | no_license | marukosu/raspi-login-checker-server | 69878e9c863c0a3fe0d399c691c167cdb006600b | 14f6c0b0cb790b55dd51189bfd7f8f2747544ca4 | refs/heads/master | 2020-03-09T09:37:20.510592 | 2018-04-22T08:12:52 | 2018-04-22T08:12:52 | 128,716,909 | 1 | 1 | null | 2018-04-22T08:12:53 | 2018-04-09T05:02:17 | Python | UTF-8 | Python | false | false | 1,010 | py | from datetime import datetime
from server import db
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), index=True, nullable=False, unique=True)
cards = db.relationship('Card', backref='user', lazy=True)
logins = db.relationship('Login', backref='user', lazy=True)
created_at = db.Column(db.DateTime, nullable=False, default=datetime.now)
updated_at = db.Column(db.DateTime, nullable=False, default=datetime.now, onupdate=datetime.now)
def __init__(self, username):
self.username = username
def __repr__(self):
return f'<User {self.username}>'
def update(self, username):
self.username = username
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
@classmethod
def create(cls, username):
user = User(username)
db.session.add(user)
db.session.commit()
return user
| [
"ozeki.haru@gmail.com"
] | ozeki.haru@gmail.com |
0df5df687aaa9a0594fde9bd2120516095951eea | 94ce20de2c4fb82d5295c005541766400d64a8e9 | /cli/raft-tools/tools/ZAP/run.py | 8e2b6bd08dc365f1154e0dfd96d281f91ec2d02c | [
"LGPL-2.1-or-later",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | hookjr/rest-api-fuzz-testing | 27da0baab26472ede83609c6d3319a353dc9bf3a | d549b12f7213326e10d4cf5340c93d7803b0c839 | refs/heads/main | 2023-02-02T04:23:32.878185 | 2020-12-14T18:59:23 | 2020-12-14T18:59:23 | 318,024,547 | 0 | 0 | MIT | 2020-12-03T00:09:42 | 2020-12-03T00:02:58 | null | UTF-8 | Python | false | false | 2,068 | py | import json
import os
import subprocess
import sys
work_directory = os.environ['RAFT_WORK_DIRECTORY']
run_directory = os.environ['RAFT_TOOL_RUN_DIRECTORY']
def auth_token(init):
with open(os.path.join(work_directory, "task-config.json"), 'r') as task_config:
config = json.load(task_config)
auth_config = config.get("authenticationMethod")
if auth_config:
if auth_config.get("txtToken"):
token = os.environ.get(f"RAFT_{auth_config['txtToken']}") or os.environ.get(auth_config["txtToken"])
return token
elif auth_config.get("commandLine"):
subprocess.getoutput(auth_config.get("commandLine"))
elif auth_config.get("msal"):
msal_dir = os.path.join(run_directory, "..", "..", "auth", "python3", "msal")
if init:
print("Installing MSAL requirements")
subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", os.path.join(msal_dir, "requirements.txt")])
else:
print("Retrieving MSAL token")
sys.path.append(msal_dir)
authentication_environment_variable = auth_config["msal"]
import msal_token
token = msal_token.token_from_env_variable( authentication_environment_variable )
if token:
print("Retrieved MSAL token")
return token
else:
print("Failed to retrieve MSAL token")
return None
else:
print(f'Unhandled authentication configuration {auth_config}')
return None
if __name__ == "__main__":
if len(sys.argv) == 2 and sys.argv[1] == "install":
subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", os.path.join(run_directory, "requirements.txt")])
auth_token(True)
else:
token = auth_token(False)
import scan
scan.run(token) | [
"noreply@github.com"
] | noreply@github.com |
09228ff571e986a20c1511f785929ab92911067b | 53e44e08fe946dc9c648e570ba4fc7f0024b9dac | /apps/mascota/views.py | b4902ed20653e87d6e8a18ab58555f159f92a94c | [] | no_license | JPablo1997/Refugio_Django | 19079c30324aacb517132b05079336bf697d19a7 | 78044ae3249cf9ecccb642afbee6da821eda8a28 | refs/heads/master | 2021-05-01T15:03:12.817638 | 2018-02-13T19:50:23 | 2018-02-13T19:50:23 | 121,028,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core import serializers
from apps.mascota.forms import MascotaForm
from apps.mascota.models import Mascota
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
# Create your views here.
def index(request):
return render(request, 'mascota/index.html')
def listado(request):
lista = serializers.serialize('json', Mascota.objects.all(), fields=['nombre','sexo','edad_aproximada','fecha_rescate'])
return HttpResponse(lista, content_type='application/json')
def mascota_view(request):
if request.method == 'POST':
form = MascotaForm(request.POST)
if form.is_valid():
form.save()
return redirect('http://127.0.0.1:8000/mascota/listar/')
else:
form = MascotaForm()
return render(request, 'mascota/mascota_form.html', {'form':form})
def mascota_list(request):
mascota = Mascota.objects.all().order_by('id')
contexto = {'mascotas':mascota}
return render(request, 'mascota/mascota_list.html', contexto)
def mascota_edit(request, id_mascota):
mascota = Mascota.objects.get(id=id_mascota)
if request.method == 'GET':
form = MascotaForm(instance=mascota)
else:
form = MascotaForm(request.POST, instance=mascota)
if form.is_valid():
form.save()
return redirect('http://127.0.0.1:8000/mascota/listar/')
return render(request, 'mascota/mascota_form.html', {'form':form})
def mascota_delete(request, id_mascota):
mascota = Mascota.objects.get(id=id_mascota)
if request.method == 'POST':
mascota.delete()
return redirect('mascota_listar')
return render(request, 'mascota/mascota_delete.html', {'mascota':mascota})
class MascotaList(ListView):
model = Mascota
template_name = 'mascota/mascota_list.html'
ordering = ['id']
paginate_by = 2
class MascotaCreate(CreateView):
model = Mascota
form_class = MascotaForm
template_name = 'mascota/mascota_form.html'
success_url = reverse_lazy('mascota_listar')
class MascotaUpdate(UpdateView):
Model = Mascota
queryset = Mascota.objects.all()
form_class = MascotaForm
template_name = 'mascota/mascota_form.html'
success_url = reverse_lazy('mascota_listar')
class MascotaDelete(DeleteView):
Model = Mascota
queryset = Mascota.objects.all()
template_name = 'mascota/mascota_delete.html'
success_url = reverse_lazy('mascota_listar')
| [
"dc16009@ues.edu.sv"
] | dc16009@ues.edu.sv |
dd9ab3be8fc316483e7dbfc10219138f8cc4d984 | 2c41f838c5d5d43776a7520bc63b7b796f1618df | /is_prime/solution.py | 031c73627e158b20cf684a69ce1b81f13416a86f | [] | no_license | EmanuelStoyanov/homework1 | f1c4f849cb80ca1acb5c88bc7746747e8bf6398e | c693fcfb892fb126dbb40516c4f2711501c924bc | refs/heads/master | 2021-01-10T19:02:35.604292 | 2014-03-28T13:02:12 | 2014-03-28T13:02:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | def is_prime(n):
return sum_of_divisors(abs(n)) == abs(n) + 1
def sum_of_divisors(n):
return sum (divisors(n))
def divisors(n):
if n != 1:
arr = [1, n]
else:
return [1]
for i in range(2,n):
if n % i == 0:
arr.append(i)
return arr | [
"emanuel.stoyanov@gmail.com"
] | emanuel.stoyanov@gmail.com |
b46b95696facf08a3eaebf1ce39922c26140cb50 | f2b5ed13116eff2eecef88e941e2498585040fc2 | /WEEK4/reschedule_exams/submit_pass!.py | 3beb44193ced0705cbbeebc00976acfd4fa2e82b | [] | no_license | sinhalok/Coursera-Advanced-Algorithms-and-Complexity | 74cf0a6ee27a883e5fcfeedab3c01e5269dc63f6 | ae66df98cfa1aae772ac03e71c7022dedd96a59d | refs/heads/master | 2020-04-29T02:56:36.468546 | 2019-03-15T09:55:03 | 2019-03-15T09:55:03 | 175,789,466 | 12 | 9 | null | null | null | null | UTF-8 | Python | false | false | 7,717 | py | # python3
from enum import Enum
import collections
import sys
import threading
sys.setrecursionlimit(10 ** 6) # max depth of recursion
threading.stack_size(2 ** 26) # new thread will get stack of such size
class Ordered_Sets(collections.MutableSet):
def __init__( self, iterable=None ):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__( self ):
return len(self.map)
def __contains__( self, key ):
return key in self.map
def add( self, key ):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard( self, key ):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__( self ):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__( self ):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop( self, last=True ):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__( self ):
if not self:
return '%s()' % (self.__class__.__name__,)
def post_orders_( adjacents ):
"""
Order the nodes of the graph according to their
post order. Uses (possibly repeated) Depth First Search on
the graph.
"""
def dfs( node, order, traversed ):
traversed.add(node)
for adj in adjacents[node]:
if adj in traversed:
continue
dfs(adj, order, traversed)
if node in vertices:
vertices.remove(node)
order.add(node)
post_order = Ordered_Sets([])
traversed = set([])
vertices = set([node for node in range(len(adjacents))])
while True:
dfs(vertices.pop(), post_order, traversed)
if len(post_order) == len(adjacents):
break
assert len(post_order) == len(adjacents)
return list(post_order)
def connected_component_( adjacents, node, found ):
connected = set([])
def dfs( node, connected ):
connected.add(node)
found.add(node)
for adj in adjacents[node]:
if adj in found or adj in connected:
continue
dfs(adj, connected)
dfs(node, connected)
return connected
def analyse_connected_components_( n, adjacents, reverse ):
# Ensure topological ordering.
order = post_orders_(reverse)
# print('orders: {0}'.format(orders))
order_pointer = len(order) - 1
found = set([])
ccs = []
while order_pointer >= 0:
if order[order_pointer] in found:
order_pointer -= 1
continue
ccs.append(connected_component_(adjacents, order[order_pointer], found))
assert len(found) == len(adjacents), 'found {0} nodes, but {1} were specified'.format(len(found), n)
return ccs
class ImplicationGraph(object):
var_dict = {}
node_dict = {}
adjacents = None
reversed_adjs = None
def __init__( self, n, clauses ):
node_num = 0
self.adjacents = [[] for _ in range(2 * n)]
self.reversed_adjs = [[] for _ in range(2 * n)]
for clause in clauses:
left = clause[0]
right = clause[1]
for term in [left, right]:
if not term in self.node_dict:
self.var_dict[node_num] = term
self.node_dict[term] = node_num
node_num += 1
if not -term in self.node_dict:
self.var_dict[node_num] = -term
self.node_dict[-term] = node_num
node_num += 1
self.adjacents[self.node_dict[-left]].append(self.node_dict[right])
self.reversed_adjs[self.node_dict[right]].append(self.node_dict[-left])
# edges.append((node_dict[-left], node_dict[right]))
self.adjacents[self.node_dict[-right]].append(self.node_dict[left])
self.reversed_adjs[self.node_dict[left]].append(self.node_dict[-right])
# edges.append((node_dict[-right], node_dict[left]))
self.adjacents = self.adjacents[:node_num]
self.reversed_adjs = self.reversed_adjs[:node_num]
class Colour(Enum):
R = 0
G = 1
B = 2
def get_node_colour( var ):
node = (var - 1) // 3
c = var % 3
if c == 0:
return node, Colour(2)
if c == 2:
return node, Colour(1)
if c == 1:
return node, Colour(0)
def generate_2sat_clauses( n, edges, colours ):
"""
If C is the set of colours (R, G, B), the colour c of each node must change to one of the
colours in the set: C difference (c).
It must also be the case that the colour c of any two adjacent nodes is not the same.
"""
red = Colour(0)
green = Colour(1)
blue = Colour(2)
rgb = set([red, green, blue])
clauses = []
for node_ in range(1, n + 1):
node = node_ * 3 - 2
c1 = Colour[colours[node_ - 1]]
others = rgb.difference(set([c1]))
c2 = others.pop()
c3 = others.pop()
c1_var = node + c1.value
c2_var = node + c2.value
c3_var = node + c3.value
clauses += [[c2_var, c3_var], [-c2_var, -c3_var], [-c1_var, -c1_var]]
for edge in edges:
# Add adjacency conditions.
left = edge[0] * 3 - 2
right = edge[1] * 3 - 2
clauses += [[-left, -right], [-(left + 1), -(right + 1)], [-(left + 2), -(right + 2)]]
return clauses
def assign_new_colors( n, edges, colours ):
"""
Arguments: # * `n` - the number of vertices.
* `edges` - list of edges, each edge is a tuple (u, v), 1 <= u, v <= n.
* `colors` - list consisting of `n` characters, each belonging to the set {'R', 'G', 'B'}.
Return value:
* If there exists a proper recoloring, return value is a list containing new colors, similar to the `colors` argument.
* Otherwise, return value is None.
"""
num_vars = n * 3
clauses = generate_2sat_clauses(n, edges, colours[0])
graph = ImplicationGraph(num_vars, clauses)
ccs = analyse_connected_components_(num_vars, graph.adjacents, graph.reversed_adjs)
result = collections.defaultdict(lambda: None)
for cc in ccs:
cc_vars = set([])
for node in cc:
# Check valid solution.
litteral = graph.var_dict[node]
if abs(litteral) in cc_vars:
return None
else:
cc_vars.add(abs(litteral))
if result[abs(litteral)] is None:
if litteral < 0:
result[abs(litteral)] = 0
else:
result[abs(litteral)] = 1
result_colours = []
for key in sorted(result.keys()):
if result[key] == 1:
node, colour = get_node_colour(key)
result_colours.append(colour.name)
return result_colours
def main():
n, m = map(int, input().split())
colors = input().split()
edges = []
for i in range(m):
u, v = map(int, input().split())
edges.append((u, v))
new_colors = assign_new_colors(n, edges, colors)
if new_colors is None:
print("Impossible")
else:
print(''.join(new_colors))
main() | [
"aloksinha422@gmail.com"
] | aloksinha422@gmail.com |
8c28fb51601157fcd64fda227a3c3f719d5b5f4d | 69814c9c3881855e6a8981eb7fc3d869549d3cd0 | /sedfitter/sed/cube.py | 3eeab29c13a149243e1fb9efb4b41d7ba633b166 | [
"BSD-2-Clause"
] | permissive | xueyingtianhua/sedfitter | 83c2f94ba0fdba0af56ccc8a4ad6fd92b62085ab | ec8722ec423ac684e4930fe23a98cd7b2d5b9f50 | refs/heads/master | 2021-01-13T03:29:23.032572 | 2016-07-31T21:27:58 | 2016-07-31T21:27:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,222 | py | import abc
import numpy as np
from astropy import units as u
from astropy.io import fits
from astropy.extern import six
from astropy.table import Table
from ..utils.validator import validate_scalar, validate_array
from .helpers import parse_unit_safe, table_to_hdu, assert_allclose_quantity
__all__ = ['SEDCube', 'PolarizationCube']
@six.add_metaclass(abc.ABCMeta)
class BaseCube(object):
"""
A cube to represent a cube of models.
This consists of values and uncertainties as a function of wavelength,
aperture, and models.
Parameters
----------
names : 1-d iterable, optional
The names of all the models in the cube
distance : `~astropy.units.Quantity`, optional
The distance assumed for the values
wav : 1-d `~astropy.units.Quantity`, optional
The wavelengths at which the SEDs are defined (cannot be used with ``nu``)
nu : 1-d `~astropy.units.Quantity`, optional
The frequencies at which the SEDs are defined (cannot be used with ``wav``)
apertures : 1-d `~astropy.units.Quantity`, optional
The ap for which the SEDs are defined
val : 3-d `~astropy.units.Quantity`, optional
The values of the fluxes or polarization
unc : 3-d `~astropy.units.Quantity`, optional
The uncertainties in the fluxes or polarization
"""
_physical_type = None
def __init__(self, valid=None, names=None, distance=None, wav=None,
nu=None, apertures=None, val=None, unc=None):
# Which models are valid
self.valid = valid
# The names of all the models
self.names = names
# The distance at which the fluxes are defined
self.distance = distance
# The wavelengths and ap
self.wav = wav
self.nu = nu
self.apertures = apertures
# The value and uncertainties
self.val = val
self.unc = unc
def __eq__(self, other):
try:
assert np.all(self.valid == other.valid)
assert np.all(self.names == other.names)
assert_allclose_quantity(self.distance, other.distance)
assert_allclose_quantity(self.wav, other.wav)
assert_allclose_quantity(self.nu, other.nu)
assert_allclose_quantity(self.apertures, other.apertures)
assert_allclose_quantity(self.val, other.val)
assert_allclose_quantity(self.unc, other.unc)
except AssertionError:
raise
return False
else:
return True
@property
def valid(self):
"""
Which models are valid
"""
if self.n_models is None or self._valid is not None:
return self._valid
else:
return np.ones(self.n_models)
@valid.setter
def valid(self, value):
if value is None:
self._valid = None
else:
self._valid = validate_array('valid', value, ndim=1,
shape=None if self.n_models is None else (self.n_models,))
@property
def names(self):
"""
The names of the models
"""
return self._names
@names.setter
def names(self, value):
if value is None:
self._names = None
else:
if not isinstance(value, np.ndarray):
value = np.array(value)
self._names = value
@property
def wav(self):
"""
The wavelengths at which the SEDs are defined.
"""
if self._wav is None and self._nu is not None:
return self._nu.to(u.micron, equivalencies=u.spectral())
else:
return self._wav
@wav.setter
def wav(self, value):
if value is None:
self._wav = None
else:
self._nu = None
self._wav = validate_array('wav', value, domain='positive', ndim=1,
shape=None if self.nu is None else (len(self.nu),),
physical_type='length')
@property
def nu(self):
"""
The frequencies at which the SEDs are defined.
"""
if self._nu is None and self._wav is not None:
return self._wav.to(u.Hz, equivalencies=u.spectral())
else:
return self._nu
@nu.setter
def nu(self, value):
if value is None:
self._nu = None
else:
self._wav = None
self._nu = validate_array('nu', value, domain='positive', ndim=1,
shape=None if self.wav is None else (len(self.wav),),
physical_type='frequency')
@property
def apertures(self):
"""
The ap at which the SEDs are defined.
"""
return self._apertures
@apertures.setter
def apertures(self, value):
if value is None:
self._apertures = None
else:
self._apertures = validate_array('apertures', value, domain='positive',
ndim=1, physical_type='length')
@property
def distance(self):
"""
The distance at which the SEDs are defined.
"""
return self._distance
@distance.setter
def distance(self, value):
if value is None:
self._distance = None
else:
self._distance = validate_scalar('distance', value, domain='positive',
physical_type='length')
@property
def val(self):
"""
The fluxes or polarization values.
"""
return self._val
@val.setter
def val(self, value):
if value is None:
self._val = value
else:
self._val = validate_array('val', value, ndim=3,
shape=(self.n_models, self.n_ap, self.n_wav),
physical_type=self._physical_type)
@property
def unc(self):
"""
The uncertainties in the fluxes or polarization.
"""
return self._unc
@unc.setter
def unc(self, value):
if value is None:
self._unc = value
else:
self._unc = validate_array('unc', value, ndim=3,
shape=(self.n_models, self.n_ap, self.n_wav),
physical_type=self._physical_type)
@property
def n_ap(self):
if self.apertures is None:
return 1
else:
return len(self.apertures)
@property
def n_wav(self):
if self.wav is None:
return None
else:
return len(self.wav)
@property
def n_models(self):
if self.names is None:
return None
else:
return len(self.names)
@classmethod
def read(cls, filename, order='nu', memmap=True):
"""
Read models from a FITS file.
Parameters
----------
filename: str
The name of the file to read the cube from.
order: str, optional
Whether to sort the SED by increasing wavelength (`wav`) or
frequency ('nu').
"""
# Create class instance
cube = cls()
# Open FILE file
hdulist = fits.open(filename, memmap=memmap)
# Extract distance
cube.distance = hdulist[0].header['DISTANCE'] * u.cm
# Get validity
cube.valid = hdulist[0].data.astype(bool)
# Extract model names
cube.names = hdulist['MODEL_NAMES'].data['MODEL_NAME'].astype(str)
# Extract wavelengths
hdu_spectral = hdulist['SPECTRAL_INFO']
cube.wav = u.Quantity(hdu_spectral.data['WAVELENGTH'],
parse_unit_safe(hdu_spectral.columns[0].unit))
# Extract apertures
try:
hdu_apertures = hdulist['APERTURES']
except KeyError:
pass
else:
cube.apertures = u.Quantity(hdu_apertures.data['APERTURE'],
parse_unit_safe(hdu_apertures.columns[0].unit))
# Extract value
hdu_val = hdulist['VALUES']
cube.val = u.Quantity(hdu_val.data,
parse_unit_safe(hdu_val.header['BUNIT']),
copy=False)
# Extract uncertainty
try:
hdu_unc = hdulist['UNCERTAINTIES']
except KeyError:
pass
else:
cube.unc = u.Quantity(hdu_unc.data,
parse_unit_safe(hdu_unc.header['BUNIT']),
copy=False)
# The following should only use views and should therefore not be slow
if ((order == 'nu' and cube.nu[0] > cube.nu[-1]) or
(order == 'wav' and cube.wav[0] > cube.wav[-1])):
cube.wav = cube.wav[::-1]
cube.val = cube.val[:, ::-1, :]
cube.unc = cube.unc[:, ::-1, :]
return cube
def _check_all_set(self):
if self.wav is None:
raise ValueError("Wavelengths 'wav' are not set")
if self.nu is None:
raise ValueError("Frequencies 'nu' are not set")
if self.val is None:
raise ValueError("Values 'val' are not set")
if self.distance is None:
raise ValueError("Value 'distance' is not set")
def write(self, filename, overwrite=False, meta={}):
"""
Write the models to a FITS file.
Parameters
----------
filename: str
The name of the file to write the cube to.
"""
self._check_all_set()
hdulist = fits.HDUList()
# Create empty first HDU and add distance
hdu0 = fits.PrimaryHDU(data=self.valid.astype(int))
hdu0.header['distance'] = (self.distance.to(u.cm).value, 'Distance assumed for the values, in cm')
hdu0.header['NWAV'] = (self.n_wav, "Number of wavelengths")
if self.apertures is not None:
hdu0.header['NAP'] = (self.n_ap, "Number of apertures")
for key in meta:
hdu0.header[key] = meta[key]
hdulist.append(hdu0)
# Create names table
t1 = Table()
t1['MODEL_NAME'] = np.array(self.names, 'S')
hdu1 = table_to_hdu(t1)
hdu1.name = "MODEL_NAMES"
hdulist.append(hdu1)
# Create wavelength table
t2 = Table()
t2['WAVELENGTH'] = self.wav
t2['FREQUENCY'] = self.nu
hdu2 = table_to_hdu(t2)
hdu2.name = "SPECTRAL_INFO"
hdulist.append(hdu2)
# Create aperture table
if self.apertures is not None:
t3 = Table()
t3['APERTURE'] = self.apertures
hdu3 = table_to_hdu(t3)
hdu3.name = "APERTURES"
hdulist.append(hdu3)
# Create value HDU
hdu4 = fits.ImageHDU(self.val.value)
hdu4.header['BUNIT'] = self.val.unit.to_string()
hdu4.name = 'VALUES'
hdulist.append(hdu4)
# Create uncertainty HDU
if self.unc is not None:
hdu5 = fits.ImageHDU(self.unc.value)
hdu5.header['BUNIT'] = self.unc.unit.to_string()
hdu5.name = 'UNCERTAINTIES'
hdulist.append(hdu5)
# Write out HDUList
hdulist.writeto(filename, clobber=overwrite)
class SEDCube(BaseCube):
_physical_type = ('power', 'flux', 'spectral flux density')
def get_sed(self, model_name):
try:
sed_index = np.nonzero(self.names == model_name)[0][0]
except IndexError:
raise ValueError("Model '{0}' not found in SED cube".format(model_name))
from .sed import SED
sed = SED()
sed.name = model_name
sed.distance = self.distance
sed.wav = self.wav
sed.nu = self.nu
sed.apertures = self.apertures
sed.flux = self.val[sed_index, :,:]
sed.error = self.unc[sed_index, :,:]
return sed
class PolarizationCube(BaseCube):
_physical_type = ('dimensionless')
| [
"thomas.robitaille@gmail.com"
] | thomas.robitaille@gmail.com |
aed3eadc0c76d956c2c586e2a70d16eaec9aee78 | f36ea11d9055c4016f405ce4e59e63509a0200e7 | /FinancialBot/views.py | 4ea61f9985d100967fe78d69c8e3975582e78bd2 | [] | no_license | EricHu214/FinancialBot | 20136fbf1f59e1b5e700d63245adfa497cb90f73 | 9788d443b6b2b3c5335b2d19a4c8fc3b8f80363d | refs/heads/master | 2021-01-20T06:17:45.347025 | 2017-08-26T17:23:30 | 2017-08-26T17:23:30 | 101,499,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | """
Routes and views for the flask application.
"""
from datetime import datetime
from flask import render_template
from FinancialBot import app
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
return render_template(
'index.html',
title='Home Page',
year=datetime.now().year,
)
@app.route('/contact')
def contact():
"""Renders the contact page."""
return render_template(
'contact.html',
title='Contact',
year=datetime.now().year,
message='Your contact page.'
)
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About',
year=datetime.now().year,
message='Your application description page.'
)
| [
"Eric.Hu214@outlook.com"
] | Eric.Hu214@outlook.com |
0109518040f1eac4f295b8006aa89a09b6316e58 | 57f1ceb6d03771ab7ce3ef50e423d69c97321e89 | /Top10StockBundle.py | 4f22b42c2fc3cdbfecc5d325008b19f37e97df2b | [] | no_license | SamSklar/EarningsCallTranscriptAlgo | 7f0a8d599780e297e392b47960c0f8923c50735e | 551eec0321958cc9f6407ff9b04f56e65d0478e1 | refs/heads/master | 2021-08-24T04:46:48.709751 | 2017-12-08T03:18:47 | 2017-12-08T03:18:47 | 113,525,491 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,508 | py | """
This is a template algorithm on Quantopian for you to adapt and fill in.
"""
from quantopian.algorithm import attach_pipeline, pipeline_output
from quantopian.pipeline import Pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import AverageDollarVolume
from quantopian.pipeline.data import morningstar
from quantopian.pipeline.filters.morningstar import Q1500US
from dateutil.relativedelta import relativedelta
from datetime import datetime
import pandas as pd
tickerCollection = []
dayCounter = 0
def preview(df):
log.info(df.head())
return df
def initialize(context):
context.stock_list = [sid(24)]
context.min_stock = sid(24)
"""
Called once at the start of the algorithm.
"""
fetch_csv('https://www.dl.dropboxusercontent.com/s/jtxuk0152xp7m6o/NEWSCORES.csv?dl=0', date_column='Date', date_format='%m/%d/%y', pre_func=preview)
context.stocks = symbols('AAPL', 'MSFT')
#print context.tradeable.size
# Rebalance every day, 1 hour after market open.
schedule_function(my_daily_trade, date_rules.every_day(), time_rules.market_open(hours=1))
# Record tracking variables at the end of each day.
schedule_function(my_record_vars, date_rules.every_day(), time_rules.market_close())
today = get_datetime()
#one_year_ago = today - relativedelta(years=1)
# Create our dynamic stock selector.
pipe = attach_pipeline(make_pipeline(), 'pipe')
ebitda = morningstar.income_statement.ebitda.latest
long_term_debt = morningstar.balance_sheet.long_term_debt.latest
#long_term_debty2 = morningstar.balance_sheet.long_term_debt.one_year_ago
enterprise_value = morningstar.valuation.enterprise_value.latest
market_cap = morningstar.valuation.market_cap.latest
pipe.add(ebitda, 'ebitda')
pipe.add(long_term_debt, 'long_term_debt')
pipe.add(enterprise_value, 'enterprise_value')
pipe.add(market_cap, 'market_cap')
nonzero = (ebitda != 0)
ev_eb = enterprise_value/ebitda
ev_eb_ratio = ev_eb.percentile_between(0,50)
ltd_eb = long_term_debt/ebitda
ltd_eb_ratio = ltd_eb.percentile_between(50,100)
percentile = market_cap.percentile_between(25,75)
pipe.set_screen(nonzero & ev_eb_ratio & ltd_eb_ratio & percentile, overwrite=True)
def make_pipeline():
"""
A function to create our dynamic stock selector (pipeline). Documentation on
pipeline can be found here: https://www.quantopian.com/help#pipeline-title
"""
# Base universe set to the Q500US
base_universe = Q1500US()
# Factor of yesterday's close price.
yesterday_close = USEquityPricing.close.latest
pipe = Pipeline(
screen = base_universe,
columns = {
'close': yesterday_close,
}
)
return pipe
def before_trading_start(context, data):
"""
Called every day before market open.
"""
"""
context.output = pipeline_output('pipe')
# These are the securities that we are interested in trading each day.
context.security_list = context.output.index
"""
def my_assign_weights(context, data):
"""
Assign weights to securities that we want to order.
"""
pass
def my_daily_trade(context,data):
"""
Execute orders according to our schedule_function() timing.
"""
context.output = pipeline_output('pipe')
context.security_list = context.output.index
"""
print("stock list: ")
for stock in context.security_list:
print(stock)
"""
"""
last_date_str = data.current(sid(24), 'indicatorDate')
last_date = datetime.strptime(last_date_str, "%m/%d/%y")
today = str(get_datetime(None))
today_date = datetime.strptime(today[0:10], "%Y-%m-%d")
difference = (today_date - last_date).days
print difference
"""
global dayCounter
print len(context.security_list)
#buy top 10 stocks to start
if dayCounter == 0 or len(context.stock_list) < 10:
minval = -1
for stock in context.security_list:
if len(context.stock_list) < 10:
context.stock_list.append(stock)
if data.current(stock, 'indicator') >= minval:
context.min_stock = stock
minval = data.current(stock, 'indicator')
else:
if data.current(stock, 'indicator') >= minval:
context.stock_list.append(stock)
context.stock_list.remove(context.min_stock)
context.min_stock = stock
minval = data.current(stock, 'indicator')
for stock in context.stock_list:
order_target_percent(stock, 1.0/10)
else:
today = str(get_datetime(None))
today_date = datetime.strptime(today[0:10], "%Y-%m-%d")
for stock in context.security_list:
if data.can_trade(stock):
last_date_str = str(data.current(stock, 'indicatordate'))
if(last_date_str != 'nan'):
last_date = datetime.strptime(last_date_str, "%m/%d/%y")
#check if new transcript came out today
if (today_date-last_date).days == 0:
new_score = data.current(stock, 'indicator')
if new_score < data.current(context.min_stock, 'indicator'):
order_target_percent(context.min_stock, 0)
context.stock_list.remove(context.min_stock)
order_target_percent(stock, 1.0/10)
context.stock_list.append(stock)
updateMin(context, data)
print len(context.stock_list)
dayCounter = dayCounter + 1
pass
def updateMin(context, data):
minval = -1
for stock in context.stock_list:
if data.current(stock, 'indicator') >= minval:
context.min_stock = stock
minval = data.current(stock, 'indicator')
def my_record_vars(context, data):
"""
Plot variables at the end of each day.
"""
pass
def handle_data(context,data):
pass
| [
"noreply@github.com"
] | noreply@github.com |
bb6067cb36c939472c10f118b81728af08833449 | fca51db47e2cea902a96bfee11275e2b5cb50978 | /dendrobium_inference.py | 4266ddaaebbe5edaaef15252d3faf44db6f8953a | [] | no_license | LeonHardt427/dendrobium_tensorflow | 5cbf97af3a721a39996db56432dd86e17bd69728 | e289aa178dcb5a7aa43b355352355fda33993e63 | refs/heads/master | 2021-08-09T03:04:05.702945 | 2017-11-12T02:36:19 | 2017-11-12T02:36:19 | 108,624,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/10/27 上午10:01
# @Author : LeonHardt
# @File : dendrobium_inference.py
import tensorflow as tf
# Set parameter
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 500
# using 'tf.get_variable' to get variable.
def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape,initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer is not None:
tf.add_to_collection('losses', regularizer(weights))
return weights
# define the process of forward propagation
def inference(input_tensor, regularizer):
with tf.variable_scope('layer1'):
weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER1_NODE], initializer=tf.constant_initializer(0, 0))
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)
with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(0, 0))
layer2 = tf.matmul(layer1, weights) + biases
return layer2
| [
"leonhardt427@126.com"
] | leonhardt427@126.com |
59ca4aee3ddbe2bc9a8b423ae134935be38ca795 | b84ad47aac690d4f97b9948923785bb70ee9f8c1 | /settings.py | 6713816e3821d41a80ac35cd018ae987ad1cd75d | [] | no_license | shadabahmad01/alien_game | ec84b6c133f909e6304fd16272b0f5bab5bb2bdb | 616e695e4c8887082f8a47ac5bd2af7f6eb031d1 | refs/heads/main | 2023-06-27T04:42:38.138066 | 2021-07-22T18:00:33 | 2021-07-22T18:00:33 | 388,549,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py |
class Settings:
"""class to store all settings for alien invasion."""
def __init__(self):
"""initialize game settings"""
# screen settings.
self.screen_width = 1400
self.screen_height = 800
self.bg_color = (230,230,230)
# ship settings.
self.ship_limit = 3
# bullet settings.
self.bullet_width = 100
self.bullet_height = 15
self.bullet_color = (60,60,60)
self.bullets_allowed = 3
#Alien settings
self.fleet_drop_speed = 10
# How quickly the game speeds up
self.speedup_scale = 1.1
#scoring
self.alien_points = 50
#how quickly the alien point values increase
self.score_scale = 1.5
self.initialize_dynamic_settings()
def initialize_dynamic_settings(self):
"""initialize settings that change throughout the game."""
self.ship_speed = 1.5
self.bullet_speed = 3.0
self.alein_speed = 1.0
#fleet_direction of 1 represent right and -1 represent left.
self.fleet_direction = 1
def increase_speed(self):
"""increase speed settings."""
self.ship_speed *= self.speedup_scale
self.bullet_speed *= self.speedup_scale
self.alein_speed *= self.speedup_scale
self.alien_points = int(self.alien_points*self.score_scale)
| [
"noreply@github.com"
] | noreply@github.com |
b2c5d5aea3e3c403e5e748fe961a49dd1e7d91d2 | b3e6b3e27a39ff953ccba31eddc12539c31a4e4c | /script/complex/complex_nogpu.py | ebefb03e4e5d7c21a05658de3b3ac1fe614e7027 | [
"Apache-2.0"
] | permissive | heitorsampaio/GMXSMDF | 7b7e02f35337513d01bb531e5f9eb80f28aaf521 | 530a5c56d9e79e9f582cf10ea3e8b0e6c80c9929 | refs/heads/master | 2020-03-27T18:29:46.280452 | 2019-04-14T02:29:08 | 2019-04-14T02:29:08 | 146,925,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,831 | py | #!/usr/bin/python3
from gmxsmdscript import *
import sys
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("-s", dest="structure", required=True,
help="Structure filename directory '/home/patgen/Documentos/Dynamics/ProjectName'", type=lambda f: open(f))
parser.add_argument("-f", dest="folder", required=True,
help="Desired project folder name")
parser.add_argument("-mdt", dest="mdtime", required=True,
help="MD simulation time in nsteps (2 * 500000 = 1000 ps (1 ns)")
parser.add_argument("-l", dest="ligname", required=False,
help="Ligand filename .gro")
parser.add_argument("-grps", dest="grps", required=True,
help="TC-TGRPS for simulation (ligand name ex: DLG)")
args = parser.parse_args()
def yes_or_no(question):
reply = str(input(question+' (y/n): ')).lower().strip()
if reply[0] == 'y':
return 1
elif reply[0] == 'n':
raise SystemExit
else:
return yes_or_no("Would you like to run the simulation? (y/n) ")
print("This Script was made by Heitor Sampaio")
while True:
print("The pourpose of this script is to run a Simple MD simulation...")
if(yes_or_no('Would you like to run the simulation? ')):
break
print("done")
print(args.structure.name)
print(args.folder)
print(args.mdtime)
print(args.ligname)
print(args.grps)
with system(args.folder):
# System preparation
pdb2gmx(
ff = 'gromos53a6',
water = 'spce',
f = [args.structure.name],
o = 'protein.gro',
p = 'topol.top',
ignh = 'true'
)
def yes_or_no(question):
reply = str(input(question+' (y/n): ')).lower().strip()
if reply[0] == 'y':
return 1
elif reply[0] == 'n':
raise SystemExit
else:
return yes_or_no("Are you combine the protein and ligand to complex.gro and add the Protein-Ligand Topologies to topol.top? (y/n) ")
print(
"Please combine the protein.gro and the ligand.gro to a complex using the following command 'python combineGRO.py protein.gro ligand.gro' and then "
)
while True:
print("Combine the Protein-Ligand Topologies to topol.top as showed in tutorial")
if(yes_or_no('Are you combine the protein and ligand to complex.gro and add the Protein-Ligand Topologies to topol.top? ')):
break
print("done")
editconf(
f = 'complex.gro',
o = 'pbc.gro',
bt = 'cubic',
d = 1.0
)
solvate(
cp = 'pbc.gro',
cs = 'spc216.gro',
o = 'sol.gro',
p = 'topol.top'
)
grompp(
maxwarn = 2,
f = MDP['ions.mdp'],
c = 'sol.gro',
o = 'ions.tpr',
p = 'topol.top'
)
genion(
s = 'ions.tpr',
o = 'ions.gro',
neutral = 1,
p = "topol.top",
stdin = """
SOL
"""
)
# Steep descent energy minimization
grompp(
maxwarn = 2,
f = MDP['em.mdp'],
c = 'ions.gro',
o = 'em.tpr',
p = 'topol.top',
)
mdrun(deffnm = 'em' ,
cpi = 'md.cpt',
#append = 'true',
#nt = 20,
#pinoffset = 1,
#pinstride = 2,
#gpu_id = "01",
v = 'true',
#resethway = 'true',
#pin = 'on',
#nb = 'gpu'
)
energy(
f = 'em.edr',
o = 'potential.xvg',
stdin = """
10 0
"""
)
make_ndx(
f = [args.ligname],
o = 'index_lig.ndx',
stdin = '''
0 & ! a H*
q | q
'''
)
genrestr(
f = [args.ligname],
n = 'index_lig.ndx',
o = 'posre_lig.itp',
fc = [1000 , 1000 , 1000],
stdin = """
0
"""
)
def yes_or_no(question):
reply = str(input(question+' (y/n): ')).lower().strip()
if reply[0] == 'y':
return 1
elif reply[0] == 'n':
raise SystemExit
else:
return yes_or_no("Are you add the ligand position restrain to topol.top? (y/n) ")
print(
"Please add the ligand position restrain to topol.top "
)
while True:
print("Please add the ligand position restrain to topol.top as showed in tutorial")
if(yes_or_no('Are you add the ligand position restrain to topol.top? ')):
break
print("done")
make_ndx(
f = 'em.gro',
o = 'index.ndx',
stdin = '''
1 | 13
q | q
'''
)
#nvt
grompp(
f = MDP['nvt.mdp', {
'tc-grps' : [args.grps , 'Water_and_ions'] ,
}],
c = 'em.gro',
o = 'nvt.tpr',
p = 'topol.top',
r = 'em.gro',
n = 'index.ndx',
maxwarn = 2
)
mdrun(deffnm = 'nvt',
cpi = 'md.cpt',
#append = 'true',
#nt = 20,
#pinoffset = 1,
#pinstride = 2,
#gpu_id = "01",
v = 'true',
#resethway = 'true',
#pin = 'on',
#nb = 'gpu'
)
energy(
f = 'nvt.edr',
o = 'temperature.xvg',
stdin = """
15 0
"""
)
#npt
grompp(
f = MDP['npt.mdp', {
'tc-grps' : [args.grps , 'Water_and_ions'] ,
}],
c = 'nvt.gro',
o = 'npt.tpr',
p = 'topol.top',
t = 'nvt.cpt',
r = 'nvt.gro',
n = 'index.ndx',
maxwarn = 2
)
mdrun(deffnm = 'npt' ,
cpi = 'md.cpt',
#append = 'true',
#nt = 20,
#pinoffset = 1,
#pinstride = 2,
#gpu_id = "01",
v = 'true',
#resethway = 'true',
#pin = 'on',
#nb = 'gpu'
)
energy(
f = 'npt.edr',
o = 'pressure.xvg',
stdin = """
16 0
"""
)
energy(
f = 'npt.edr',
o = 'density.xvg',
stdin = """
22 0
"""
)
# Molecular dynamics
grompp(
f = MDP['md.mdp', {
'nsteps' : args.mdtime ,
'tc-grps' : [args.grps , 'Water_and_ions'] ,
}],
c = 'npt.gro',
o = 'md.tpr',
p = 'topol.top',
t = 'npt.cpt',
r = 'npt.gro',
n = 'index.ndx',
maxwarn = 2
)
mdrun(deffnm = 'md',
cpi = 'md.cpt',
#append = 'true',
#nt = 20,
#pinoffset = 1,
#pinstride = 2,
#gpu_id = "01",
v = 'true',
#resethway = 'true',
#pin = 'on',
#nb = 'gpu'
)
trjconv(
s = 'md.tpr',
f = 'md.xtc',
o = 'md_noPBC.xtc',
pbc = 'mol',
ur = 'compact',
stdin = """
0
"""
)
rms(
s = 'md.tpr',
f = 'md_noPBC.xtc',
o = 'rmsd.xvg',
tu = 'ns',
stdin = """
4 4
"""
)
gyrate(
s = 'md.tpr',
f = 'md_noPBC.xtc',
o = 'gyrate.xvg',
stdin = """
1
"""
)
sasa(
s = 'md.tpr',
f = 'md_noPBC.xtc',
o = 'sasa.xvg',
stdin = """
1
"""
)
rmsf(
s = 'md.tpr',
f = 'md_noPBC.xtc',
o = 'rmsf.xvg',
res = 'true',
stdin = """
1
"""
)
rmsf(
s = 'md.tpr',
f = 'md_noPBC.xtc',
o = 'bfactor.xvg',
oq = 'bfactor.pdb',
stdin = """
1
"""
)
hbond(
s = 'md.tpr',
f = 'md_noPBC.xtc',
dist = 'hbond.xvg',
g = 'hbond.log',
stdin = """
1 1
"""
)
| [
"heitorsampaio@Heitors-MacBook-Pro.local"
] | heitorsampaio@Heitors-MacBook-Pro.local |
03247b25e26aeb726a52530199ae1a3d33719eff | 9f457c291a17e8dc6e43b26cdcb6cc18d128ad81 | /print_array_in_pendulum_form.py | f82d2a10b17d435a1165d232fb4f1d8e97904d04 | [] | no_license | agvaibhav/arrays | 2d19254bf3983602644dd3978721ade4ca3981d5 | 857857ccca04ce6384330a7fd2ef529b8430d5f6 | refs/heads/master | 2020-03-26T03:10:43.912686 | 2019-08-07T05:04:21 | 2019-08-07T05:04:21 | 144,442,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | '''
ques
Write a program to input a list of n integers in an array and arrange them
in a way similar to the to-and-fro movement of a Pendulum.
The minimum element out of the list of integers, must come
in center position of array. If there are even elements,
then minimum element should be moved to (n-1)/2 index (considering that indexes start from 0)
The next number (next to minimum) in the ascending order,
goes to the right, the next to next number goes to
the left of minimum number and it continues like a Pendulum.
'''
#code
t = int(input())
for _ in range(t):
n = int(input())
a = list(map(int, input().split()))
array = [None]*n
a.sort()
for i in range(0,n,2):
array[(n-1-i)//2] = a[i]
if n+1+i<2*n:
array[(n+1+i)//2] = a[i+1]
for j in array:
print(j,end=' ')
print()
| [
"noreply@github.com"
] | noreply@github.com |
7f86da3f8807c7398288210caa1a6b17f88d9d2d | 474afaa013b85973039d5f158921c419538193b3 | /obselete/8healthDebug.py | 531384b6324ce9bf436125d0a0366f9dfd77324e | [] | no_license | killeronline/Madagaskar | db04b07e78d728e55f4264a8ae247cd41e793bc1 | 6683248824fb4526a57baff930ad158702fd2b4a | refs/heads/master | 2020-05-18T07:25:52.553129 | 2019-05-28T20:48:17 | 2019-05-28T20:48:17 | 184,264,565 | 1 | 0 | null | 2019-05-02T22:36:11 | 2019-04-30T13:12:39 | Python | UTF-8 | Python | false | false | 1,540 | py | # Load libraries
import os
import datetime
import pandas as pd
def get_health(filename):
df = pd.read_csv(filename)
volumeColumnName = 'No. of Shares'
opriceColumnName = 'Open'
hpriceColumnName = 'High'
lpriceColumnName = 'Low'
cpriceColumnName = 'Close'
prices = [opriceColumnName,
hpriceColumnName,
lpriceColumnName,
cpriceColumnName]
others = [volumeColumnName]
df = df[prices+others]
df = df.iloc[::-1]
df = df.reset_index(drop=True)
p = 0
c = 0
percent = 1
for i in range(1,df.shape[0]):
c += 1
cprice = df[cpriceColumnName][i]
oprice = df[opriceColumnName][i]
eff = cprice
diff = eff - oprice
change = (diff*100)/oprice
if eff > oprice and change > percent :
p += 1
return [p,c]
chn = []
st = datetime.datetime.now()
i = 0
codes = [
'BOM531562',
'BOM538652',
'BOM505585',
'BOM512014',
'BOM512309',
'BOM536993',
'BOM507952',
'BOM531628',
'BOM532068',
'BOM537867',
'BOM539304',
'BOM501368',
'BOM512063',
'BOM537648'
]
for code in codes :
filename = os.path.join('datasets',code+'.csv')
if os.path.exists(filename):
p,c = get_health(filename)
if c > 0 :
pt = p/c
else :
pt = 0
chn.append(code+","+str(pt)+","+str(c)+"\n")
et = datetime.datetime.now()
tt = (et-st).seconds
print("Completed",tt)
| [
"KILLA"
] | KILLA |
8acd740c72e0419c3de95ec78794587b7345aea1 | e6ab93f7f58ba12318959b4f415c9f042e1e9bec | /Test3/test3-3-3.py | 5665c7529b884146f945c074c2415b627f3e849b | [] | no_license | dlvguo/SklearnTests | 3281c961faece67012b9e0fed652a64637e248a4 | 785ec9259b8d0a2bd962b8b0df41e44a06ca7c2d | refs/heads/master | 2022-11-15T12:05:37.405873 | 2020-07-10T05:42:54 | 2020-07-10T05:42:54 | 272,647,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | import numpy as np
import time
from sklearn import preprocessing
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import silhouette_score
from Test3.S_Dbw import S_Dbw
from sklearn.cluster import AgglomerativeClustering
# 实验3 第(3)步骤3 使用层次聚类计算
mobilePath = "../实验数据/移动客户数据表.tsv"
np.set_printoptions(precision=2, suppress=True)
min_max_scaler = preprocessing.MinMaxScaler()
x_feature = min_max_scaler.fit_transform(np.genfromtxt(mobilePath, skip_header=1, delimiter='\t')[:, 4:])
selector = VarianceThreshold(0)
selector.fit(x_feature)
arr = np.argsort(-selector.variances_)
row_tag = np.genfromtxt(mobilePath, max_rows=1, dtype=str, delimiter='\t', usecols=arr[:20])
x_feature = min_max_scaler.fit_transform(np.genfromtxt(mobilePath, skip_header=1, delimiter='\t', usecols=arr[:20]))
time_start = time.time()
clunum = 10
acl = AgglomerativeClustering(n_clusters=10)
acl.fit(x_feature)
pre = acl.labels_
arr = [0 for i in range(clunum)]
for i in range(len(pre)):
arr[pre[i]] += 1
print('聚类质量SSE:')
for i in range(clunum):
print(i, '\t', arr[i], '\t', '{:.0%}'.format(arr[i] / len(pre)))
time_end = time.time()
print('聚类运算时间 {:.2f}'.format(time_end - time_start), 's')
print('Silhouette:', silhouette_score(x_feature, pre, metric='euclidean'))
s_dbw = S_Dbw(x_feature, pre)
print('S_Dbw', s_dbw.result())
| [
"dlvguo@qq.com"
] | dlvguo@qq.com |
61d10d9f7cf9442710afea9bf8178f84460771bd | 93935f5d37641fa5b76ff709188a5a2fc5c908d7 | /test.py | 1f5fcfd87656659d7140f1a1aa129eb3debf5433 | [] | no_license | br4bit/TDP_Queue | d32812edee004535b06f4f11793cfdfc4832d2e4 | f13621c62578b543846917e69fd3ce588252c163 | refs/heads/master | 2021-07-08T18:11:34.174147 | 2017-10-07T22:24:39 | 2017-10-07T22:24:39 | 106,134,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from Collections.ArrayQueue import ArrayQueue
f = ArrayQueue()
f.enqueue(1)
f.enqueue(2)
f.enqueue(3)
f.enqueue(4)
f.enqueue(5)
f.enqueue(6)
f.dequeue()
f.dequeue()
f.dequeue()
f.dequeue()
f.dequeue()
f.dequeue()
f.enqueue(6)
f.enqueue(5)
f.enqueue(4)
print(len(f))
print(f)
print(f._front)
| [
"luxifero@live.it"
] | luxifero@live.it |
c7f104841f354af7596f72b7daaf1092f49b6f9f | 5b0ad97cb6924e487c0921965dacb3dd18f19a1a | /src/FourierTransformation/src/utils.py | 1f58c19f5b54c36b5d15affd8b83a12457005f7f | [] | no_license | sinoyou/ImageProcess_and_PatternRecognition | bcde442b00a86f3dcb6c49f5f1e2857b48c41ad5 | dffee2c21ac789066a3f4ceddf69172e417f84e7 | refs/heads/master | 2023-01-28T04:24:10.700353 | 2020-11-30T05:35:55 | 2020-11-30T05:35:55 | 247,293,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | import numpy as np
def mirror(array):
"""
Apply mirror transform to 2D array.
The array will be equally split to 4 parts by vertically and horizontally.
Then in each part, up-down and left-right flip will applied.
Finally, 4 parts will be concat as original order.
Application Area: frequency domain visualization, preprocess of frequency filter and etc.
HINT: 此操作是可逆的,两次应用后可以恢复原始情况。
:return:
"""
f = array
width_half = f.shape[1] // 2
height_half = f.shape[0] // 2
left_up = np.fliplr(np.flipud(f[0:height_half, 0:width_half]))
left_down = np.fliplr(np.flipud(f[height_half:, 0:width_half]))
right_up = np.fliplr(np.flipud(f[0:height_half, width_half:]))
right_down = np.fliplr(np.flipud(f[height_half:, width_half:]))
up = np.concatenate([left_up, right_up], axis=1)
down = np.concatenate([left_down, right_down], axis=1)
f = np.concatenate([up, down], axis=0)
return f
| [
"youzn99@qq.com"
] | youzn99@qq.com |
721ccccfd423803b244512a2e06e67214b3cbb31 | cdc770bb64d90010f503df093436403395d54896 | /python/sklearn/metrics/ranking.py | 48344a767436b07f5dc58d36dda4da57387d280a | [
"MIT"
] | permissive | Decoder996/entity_resolution | 5a43f5f5f95ba7cd281ce9489e6e5f9bee1694ce | 4fe98701422bbceebc0dfbfc2733add2b9695f2a | refs/heads/master | 2023-05-24T20:57:29.800001 | 2015-10-02T18:49:49 | 2015-10-02T18:49:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,114 | py | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer
from ..utils import check_consistent_length
from ..utils import deprecated
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.stats import rankdata
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
@deprecated("Function 'auc_score' has been renamed to "
"'roc_auc_score' and will be removed in release 0.16.")
def auc_score(y_true, y_score):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> auc_score(y_true, y_scores)
0.75
"""
return roc_auc_score(y_true, y_score)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator"
and not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += np.divide(L, rank, dtype=float).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average the number
of labels in ``y_true` per sample.
Ties in `y_scores` are broken by giving maximal rank that would have
been assigned to all tied values.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
| [
"barnes.matt.j@gmail.com"
] | barnes.matt.j@gmail.com |
06d28ed6d203c6790e5e808bd8033beb090b6c7d | 9dc6f8d91dc56523b9688990d4ae413b0bcbd4e1 | /examples/mcscf/31-cr2_scan/cr2-scan.py | cd23eb7028ad7e19891993db6645713ad6ae6e11 | [
"Apache-2.0"
] | permissive | sunqm/pyscf | 566bc2447d8072cff442d143891c12e6414de01c | dd179a802f0a35e72d8522503172f16977c8d974 | refs/heads/master | 2023-08-15T18:09:58.195953 | 2023-03-27T21:02:03 | 2023-03-27T21:02:03 | 159,149,096 | 80 | 26 | Apache-2.0 | 2022-02-05T00:19:24 | 2018-11-26T10:10:23 | Python | UTF-8 | Python | false | false | 2,329 | py | #!/usr/bin/env python
'''
Scan Cr2 molecule singlet state dissociation curve.
Simliar tthe example mcscf/30-hf_scan, we need to control the CASSCF initial
guess using functions project_init_guess and sort_mo. In this example,
sort_mo function is replaced by the symmetry-adapted version
``sort_mo_by_irrep``.
'''
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
ehf = []
emc = []
def run(b, dm, mo, ci=None):
mol = gto.Mole()
mol.verbose = 5
mol.output = 'cr2-%2.1f.out' % b
mol.atom = [
['Cr',( 0.000000, 0.000000, -b/2)],
['Cr',( 0.000000, 0.000000, b/2)],
]
mol.basis = 'cc-pVTZ'
mol.symmetry = 1
mol.build()
mf = scf.RHF(mol)
mf.level_shift = .4
mf.max_cycle = 100
mf.conv_tol = 1e-9
ehf.append(mf.scf(dm))
mc = mcscf.CASSCF(mf, 12, 12)
mc.fcisolver.conv_tol = 1e-9
# FCI solver with multi-threads is not stable enough for this sytem
mc.fcisolver.threads = 1
if mo is None:
# the initial guess for b = 1.5
ncore = {'A1g':5, 'A1u':5} # Optional. Program will guess if not given
ncas = {'A1g':2, 'A1u':2,
'E1ux':1, 'E1uy':1, 'E1gx':1, 'E1gy':1,
'E2ux':1, 'E2uy':1, 'E2gx':1, 'E2gy':1}
mo = mcscf.sort_mo_by_irrep(mc, mf.mo_coeff, ncas, ncore)
else:
mo = mcscf.project_init_guess(mc, mo)
emc.append(mc.kernel(mo, ci)[0])
mc.analyze()
return mf.make_rdm1(), mc.mo_coeff, mc.ci
dm = mo = ci = None
for b in numpy.arange(1.5, 3.01, .1):
dm, mo, ci = run(b, dm, mo, ci)
for b in reversed(numpy.arange(1.5, 3.01, .1)):
dm, mo, ci = run(b, dm, mo, ci)
x = numpy.arange(1.5, 3.01, .1)
ehf1 = ehf[:len(x)]
ehf2 = ehf[len(x):]
emc1 = emc[:len(x)]
emc2 = emc[len(x):]
ehf2.reverse()
emc2.reverse()
with open('cr2-scan.txt', 'w') as fout:
fout.write(' HF 1.5->3.0 CAS(12,12) HF 3.0->1.5 CAS(12,12)\n')
for i, xi in enumerate(x):
fout.write('%2.1f %12.8f %12.8f %12.8f %12.8f\n'
% (xi, ehf1[i], emc1[i], ehf2[i], emc2[i]))
import matplotlib.pyplot as plt
plt.plot(x, ehf1, label='HF,1.5->3.0')
plt.plot(x, ehf2, label='HF,3.0->1.5')
plt.plot(x, emc1, label='CAS(12,12),1.5->3.0')
plt.plot(x, emc2, label='CAS(12,12),3.0->1.5')
plt.legend()
plt.show()
| [
"osirpt.sun@gmail.com"
] | osirpt.sun@gmail.com |
8022f12b6bdc80820f79b896a18b71ea2aca5b72 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03844/s016048975.py | 61fa23d2d0067a90a0b12e2ac15e028d6958db57 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | a,b,c = input().split()
print(int(a) + int(c)) if b =="+" else print(int(a) - int(c)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d40be46a3520e3def20e3f28cbda8d6a19e18912 | 10d748be435d190fe15ce4e3080bba5fffd2156d | /blogapp/views.py | 71c08d5e2093ef9600656c92d903cc373285240d | [] | no_license | liomotolani/Blog | 9c35883d0dcdbd5634617c9446991d4755353754 | 4c6301f430354389096380907e1cc012e4c7ffd5 | refs/heads/master | 2020-09-04T07:58:39.069406 | 2019-11-05T08:13:57 | 2019-11-05T08:13:57 | 219,690,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,610 | py | from django.shortcuts import render
from django.utils import timezone
from django.shortcuts import render, get_object_or_404
from django.shortcuts import redirect
from .models import Post
from .forms import PostForm
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte = timezone.now())
#{} is where we can add somethings the template can use
return render(request,'blogapp/post_list.html',{'posts':posts})
def post_detail(request,pk):
post = get_object_or_404(Post,pk=pk)
return render(request,'blogapp/post_detail.html',{'post':post})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
#Go to the post_detail to list the new post
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request,'blogapp/post_edit.html',{'form':form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blogapp/post_edit.html', {'form': form})
| [
"ligaliomotolani@gmail.com"
] | ligaliomotolani@gmail.com |
f239f2736f85aeccab8749fde1c583519836fcc6 | 95d33d03cdf8414ad5f76c6d5be28f9ea40998bb | /06-dynamic-programming-and-recursion/memoization.py | c54413fcfdcdb81ee5fe99e82422a2bf22376797 | [] | no_license | dominiquecuevas/dominiquecuevas | b93326f8cc9f720aa61b7ee4004dde56706e4551 | 10674e86ed50693fbbb3371a52dfa83f45bf088e | refs/heads/master | 2021-05-21T08:57:37.439066 | 2020-04-22T20:26:17 | 2020-04-22T20:26:17 | 252,626,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | class Fibber:
def __init__(self):
self.memo = {}
def fib(self, n):
if n == 0 or n == 1:
return n
if n in self.memo:
return self.memo[n]
result = self.fib(n-1) + self.fib(n-2)
self.memo[n] = result
print('memo:', self.memo)
return result
fibber = Fibber()
fibber.fib(5) | [
"cuevasdominique@gmail.com"
] | cuevasdominique@gmail.com |
eac538b8e2eeb2e15edebc2f6e8215ce91a5b457 | d682f7ada09b47205af2d633cdcd2e301bec4f02 | /web2py/applications/dctools/controllers/appadmin.py | 86282a23097ccfd8a2a3f9bd32738de6244638e9 | [] | no_license | Arsylk/destiny-child-forum | f15ef9f8cf7d151bab41e352983d012e2bd2dc49 | 35398a0f83175b1eed97c508c999adfa37685b45 | refs/heads/master | 2021-06-29T20:51:08.871312 | 2020-12-01T12:59:49 | 2020-12-01T12:59:49 | 186,180,761 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,938 | py | # -*- coding: utf-8 -*-
# ##########################################################
# ## make sure administrator is on localhost
# ###########################################################
import os
import socket
import datetime
import copy
import gluon.contenttype
import gluon.fileutils
from gluon._compat import iteritems
from gluon import DAL
is_gae = request.env.web2py_runtime_gae or False
# ## critical --- make a copy of the environment
global_env = copy.copy(globals())
global_env['datetime'] = datetime
import applications.dctools.controllers.api as api
remote_db = api.DestinyChildWiki()
global_env['remote_db'] = remote_db
global_env['remote_db'].define_table('Equipment',
Field('idx', 'string', default='0', length=10, requires=IS_NOT_EMPTY()),
Field('view_idx', 'string', length=10, default=''),
Field('icon', 'string', length=200, requires=IS_NOT_EMPTY(),
represent=lambda id, row: IMG(_src=row.icon, _style='width: 50px; height: 50px;')),
Field('name', 'string', length=50, requires=IS_NOT_EMPTY()),
Field('category', 'string', default='weapon', requires=IS_IN_SET(['Weapon', 'Armor', 'Accessory'])),
Field('grade', 'integer', default=0, requires=IS_NOT_EMPTY()),
Field('rare_level', 'integer', default=0, requires=IS_NOT_EMPTY(),
represent=lambda id, row: ['F', 'E', 'D', 'C', 'B', 'A'][row.rare_level]),
Field('hp', 'integer', default=0, requires=IS_NOT_EMPTY()),
Field('atk', 'integer', default=0, requires=IS_NOT_EMPTY()),
Field('def', 'integer', default=0, requires=IS_NOT_EMPTY()),
Field('agi', 'integer', default=0, requires=IS_NOT_EMPTY()),
Field('cri', 'integer', default=0, requires=IS_NOT_EMPTY()),
)
http_host = request.env.http_host.split(':')[0]
remote_addr = request.env.remote_addr
try:
hosts = (http_host, socket.gethostname(),
socket.gethostbyname(http_host),
'::1', '127.0.0.1', '::ffff:127.0.0.1')
except:
hosts = (http_host, )
if request.is_https:
session.secure()
elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1") and \
(request.function != 'manage'):
raise HTTP(200, T('appadmin is disabled because insecure channel'))
if request.function == 'manage':
if not 'auth' in globals() or not request.args:
redirect(URL(request.controller, 'index'))
manager_action = auth.settings.manager_actions.get(request.args(0), None)
if manager_action is None and request.args(0) == 'auth':
manager_action = dict(role=auth.settings.auth_manager_role,
heading=T('Manage Access Control'),
tables=[auth.table_user(),
auth.table_group(),
auth.table_permission()])
manager_role = manager_action.get('role', None) if manager_action else None
if not (gluon.fileutils.check_credentials(request) or auth.has_membership(manager_role)):
raise HTTP(403, "Not authorized")
menu = False
elif (request.application == 'admin' and not session.authorized) or \
(request.application != 'admin' and not gluon.fileutils.check_credentials(request)):
redirect(URL('admin', 'default', 'index',
vars=dict(send=URL(args=request.args, vars=request.vars))))
else:
response.subtitle = T('Database Administration (appadmin)')
menu = True
ignore_rw = True
response.view = 'appadmin.html'
if menu:
response.menu = [[T('design'), False, URL('admin', 'default', 'design',
args=[request.application])], [T('db'), False,
URL('index')], [T('state'), False,
URL('state')], [T('cache'), False,
URL('ccache')]]
# ##########################################################
# ## auxiliary functions
# ###########################################################
if False and request.tickets_db:
from gluon.restricted import TicketStorage
ts = TicketStorage()
ts._get_table(request.tickets_db, ts.tablename, request.application)
def get_databases(request):
dbs = {}
for (key, value) in global_env.items():
try:
cond = isinstance(value, GQLDB)
except:
cond = isinstance(value, SQLDB)
if cond:
dbs[key] = value
return dbs
databases = get_databases(None)
def eval_in_global_env(text):
exec ('_ret=%s' % text, {}, global_env)
return global_env['_ret']
def get_database(request):
if request.args and request.args[0] in databases:
return eval_in_global_env(request.args[0])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_table(request):
db = get_database(request)
if len(request.args) > 1 and request.args[1] in db.tables:
return (db, request.args[1])
else:
session.flash = T('invalid request')
redirect(URL('index'))
def get_query(request):
try:
return eval_in_global_env(request.vars.query)
except Exception:
return None
def query_by_table_type(tablename, db, request=request):
keyed = hasattr(db[tablename], '_primarykey')
if keyed:
firstkey = db[tablename][db[tablename]._primarykey[0]]
cond = '>0'
if firstkey.type in ['string', 'text']:
cond = '!=""'
qry = '%s.%s.%s%s' % (
request.args[0], request.args[1], firstkey.name, cond)
else:
qry = '%s.%s.id>0' % tuple(request.args[:2])
return qry
# ##########################################################
# ## list all databases and tables
# ###########################################################
def index():
return dict(databases=databases)
# ##########################################################
# ## insert a new record
# ###########################################################
def insert():
(db, table) = get_table(request)
form = SQLFORM(db[table], ignore_rw=ignore_rw)
if form.accepts(request.vars, session):
response.flash = T('new record inserted')
return dict(form=form, table=db[table])
# ##########################################################
# ## list all records in table and insert new record
# ###########################################################
def download():
import os
db = get_database(request)
return response.download(request, db)
def csv():
import gluon.contenttype
response.headers['Content-Type'] = \
gluon.contenttype.contenttype('.csv')
db = get_database(request)
query = get_query(request)
if not query:
return None
response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\
% tuple(request.vars.query.split('.')[:2])
return str(db(query, ignore_common_filters=True).select())
def import_csv(table, file):
table.import_from_csv_file(file)
def select():
import re
db = get_database(request)
dbname = request.args[0]
try:
is_imap = db._uri.startswith("imap://")
except (KeyError, AttributeError, TypeError):
is_imap = False
regex = re.compile(r'(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)')
if len(request.args) > 1 and hasattr(db[request.args[1]], '_primarykey'):
regex = re.compile(r'(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)')
if request.vars.query:
match = regex.match(request.vars.query)
if match:
request.vars.query = '%s.%s.%s==%s' % (request.args[0],
match.group('table'), match.group('field'),
match.group('value'))
else:
request.vars.query = session.last_query
query = get_query(request)
if request.vars.start:
start = int(request.vars.start)
else:
start = 0
nrows = 0
step = 100
fields = []
if is_imap:
step = 3
stop = start + step
table = None
rows = []
orderby = request.vars.orderby
if orderby:
orderby = dbname + '.' + orderby
if orderby == session.last_orderby:
if orderby[0] == '~':
orderby = orderby[1:]
else:
orderby = '~' + orderby
session.last_orderby = orderby
session.last_query = request.vars.query
form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px',
_name='query', _value=request.vars.query or '', _class="form-control",
requires=IS_NOT_EMPTY(
error_message=T("Cannot be empty")))), TR(T('Update:'),
INPUT(_name='update_check', _type='checkbox',
value=False), INPUT(_style='width:400px',
_name='update_fields', _value=request.vars.update_fields
or '', _class="form-control")), TR(T('Delete:'), INPUT(_name='delete_check',
_class='delete', _type='checkbox', value=False), ''),
TR('', '', INPUT(_type='submit', _value=T('submit'), _class="btn btn-primary"))),
_action=URL(r=request, args=request.args))
tb = None
if form.accepts(request.vars, formname=None):
regex = re.compile(request.args[0] + r'\.(?P<table>\w+)\..+')
match = regex.match(form.vars.query.strip())
if match:
table = match.group('table')
try:
nrows = db(query, ignore_common_filters=True).count()
if form.vars.update_check and form.vars.update_fields:
db(query, ignore_common_filters=True).update(
**eval_in_global_env('dict(%s)' % form.vars.update_fields))
response.flash = T('%s %%{row} updated', nrows)
elif form.vars.delete_check:
db(query, ignore_common_filters=True).delete()
response.flash = T('%s %%{row} deleted', nrows)
nrows = db(query, ignore_common_filters=True).count()
if is_imap:
fields = [db[table][name] for name in
("id", "uid", "created", "to",
"sender", "subject")]
if orderby:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop),
orderby=eval_in_global_env(orderby))
else:
rows = db(query, ignore_common_filters=True).select(
*fields, limitby=(start, stop))
except Exception as e:
import traceback
tb = traceback.format_exc()
(rows, nrows) = ([], 0)
response.flash = DIV(T('Invalid Query'), PRE(str(e)))
# begin handle upload csv
csv_table = table or request.vars.table
if csv_table:
formcsv = FORM(str(T('or import from csv file')) + " ",
INPUT(_type='file', _name='csvfile'),
INPUT(_type='hidden', _value=csv_table, _name='table'),
INPUT(_type='submit', _value=T('import'), _class="btn btn-primary"))
else:
formcsv = None
if formcsv and formcsv.process().accepted:
try:
import_csv(db[request.vars.table],
request.vars.csvfile.file)
response.flash = T('data uploaded')
except Exception as e:
response.flash = DIV(T('unable to parse csv file'), PRE(str(e)))
# end handle upload csv
return dict(
form=form,
table=table,
start=start,
stop=stop,
step=step,
nrows=nrows,
rows=rows,
query=request.vars.query,
formcsv=formcsv,
tb=tb
)
# ##########################################################
# ## edit delete one record
# ###########################################################
def update():
(db, table) = get_table(request)
keyed = hasattr(db[table], '_primarykey')
record = None
db[table]._common_filter = None
if keyed:
key = [f for f in request.vars if f in db[table]._primarykey]
if key:
record = db(db[table][key[0]] == request.vars[key[
0]]).select().first()
else:
record = db(db[table].id == request.args(
2)).select().first()
if not record:
qry = query_by_table_type(table, db)
session.flash = T('record does not exist')
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
if keyed:
for k in db[table]._primarykey:
db[table][k].writable = False
form = SQLFORM(
db[table], record, deletable=True, delete_label=T('Check to delete'),
ignore_rw=ignore_rw and not keyed,
linkto=URL('select',
args=request.args[:1]), upload=URL(r=request,
f='download', args=request.args[:1]))
if form.accepts(request.vars, session):
session.flash = T('done!')
qry = query_by_table_type(table, db)
redirect(URL('select', args=request.args[:1],
vars=dict(query=qry)))
return dict(form=form, table=db[table])
# ##########################################################
# ## get global variables
# ###########################################################
def state():
return dict()
def ccache():
if is_gae:
form = FORM(
P(TAG.BUTTON(T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")))
else:
cache.ram.initialize()
cache.disk.initialize()
form = FORM(
P(TAG.BUTTON(
T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")),
P(TAG.BUTTON(
T("Clear RAM"), _type="submit", _name="ram", _value="ram")),
P(TAG.BUTTON(
T("Clear DISK"), _type="submit", _name="disk", _value="disk")),
)
if form.accepts(request.vars, session):
session.flash = ""
if is_gae:
if request.vars.yes:
cache.ram.clear()
session.flash += T("Cache Cleared")
else:
clear_ram = False
clear_disk = False
if request.vars.yes:
clear_ram = clear_disk = True
if request.vars.ram:
clear_ram = True
if request.vars.disk:
clear_disk = True
if clear_ram:
cache.ram.clear()
session.flash += T("Ram Cleared")
if clear_disk:
cache.disk.clear()
session.flash += T("Disk Cleared")
redirect(URL(r=request))
try:
from pympler.asizeof import asizeof
except ImportError:
asizeof = False
import shelve
import os
import copy
import time
import math
from pydal.contrib import portalocker
ram = {
'entries': 0,
'bytes': 0,
'objects': 0,
'hits': 0,
'misses': 0,
'ratio': 0,
'oldest': time.time(),
'keys': []
}
disk = copy.copy(ram)
total = copy.copy(ram)
disk['keys'] = []
total['keys'] = []
def GetInHMS(seconds):
hours = math.floor(seconds / 3600)
seconds -= hours * 3600
minutes = math.floor(seconds / 60)
seconds -= minutes * 60
seconds = math.floor(seconds)
return (hours, minutes, seconds)
if is_gae:
gae_stats = cache.ram.client.get_stats()
try:
gae_stats['ratio'] = ((gae_stats['hits'] * 100) /
(gae_stats['hits'] + gae_stats['misses']))
except ZeroDivisionError:
gae_stats['ratio'] = T("?")
gae_stats['oldest'] = GetInHMS(time.time() - gae_stats['oldest_item_age'])
total.update(gae_stats)
else:
# get ram stats directly from the cache object
ram_stats = cache.ram.stats[request.application]
ram['hits'] = ram_stats['hit_total'] - ram_stats['misses']
ram['misses'] = ram_stats['misses']
try:
ram['ratio'] = ram['hits'] * 100 / ram_stats['hit_total']
except (KeyError, ZeroDivisionError):
ram['ratio'] = 0
for key, value in iteritems(cache.ram.storage):
if asizeof:
ram['bytes'] += asizeof(value[1])
ram['objects'] += 1
ram['entries'] += 1
if value[0] < ram['oldest']:
ram['oldest'] = value[0]
ram['keys'].append((key, GetInHMS(time.time() - value[0])))
for key in cache.disk.storage:
value = cache.disk.storage[key]
if key == 'web2py_cache_statistics' and isinstance(value[1], dict):
disk['hits'] = value[1]['hit_total'] - value[1]['misses']
disk['misses'] = value[1]['misses']
try:
disk['ratio'] = disk['hits'] * 100 / value[1]['hit_total']
except (KeyError, ZeroDivisionError):
disk['ratio'] = 0
else:
if asizeof:
disk['bytes'] += asizeof(value[1])
disk['objects'] += 1
disk['entries'] += 1
if value[0] < disk['oldest']:
disk['oldest'] = value[0]
disk['keys'].append((key, GetInHMS(time.time() - value[0])))
ram_keys = list(ram) # ['hits', 'objects', 'ratio', 'entries', 'keys', 'oldest', 'bytes', 'misses']
ram_keys.remove('ratio')
ram_keys.remove('oldest')
for key in ram_keys:
total[key] = ram[key] + disk[key]
try:
total['ratio'] = total['hits'] * 100 / (total['hits'] +
total['misses'])
except (KeyError, ZeroDivisionError):
total['ratio'] = 0
if disk['oldest'] < ram['oldest']:
total['oldest'] = disk['oldest']
else:
total['oldest'] = ram['oldest']
ram['oldest'] = GetInHMS(time.time() - ram['oldest'])
disk['oldest'] = GetInHMS(time.time() - disk['oldest'])
total['oldest'] = GetInHMS(time.time() - total['oldest'])
def key_table(keys):
return TABLE(
TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))),
*[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys],
**dict(_class='cache-keys',
_style="border-collapse: separate; border-spacing: .5em;"))
if not is_gae:
ram['keys'] = key_table(ram['keys'])
disk['keys'] = key_table(disk['keys'])
total['keys'] = key_table(total['keys'])
return dict(form=form, total=total,
ram=ram, disk=disk, object_stats=asizeof != False)
def table_template(table):
from gluon.html import TR, TD, TABLE, TAG
def FONT(*args, **kwargs):
return TAG.font(*args, **kwargs)
def types(field):
f_type = field.type
if not isinstance(f_type,str):
return ' '
elif f_type == 'string':
return field.length
elif f_type == 'id':
return B('pk')
elif f_type.startswith('reference') or \
f_type.startswith('list:reference'):
return B('fk')
else:
return ' '
# This is horribe HTML but the only one graphiz understands
rows = []
cellpadding = 4
color = "#000000"
bgcolor = "#FFFFFF"
face = "Helvetica"
face_bold = "Helvetica Bold"
border = 0
rows.append(TR(TD(FONT(table, _face=face_bold, _color=bgcolor),
_colspan=3, _cellpadding=cellpadding,
_align="center", _bgcolor=color)))
for row in db[table]:
rows.append(TR(TD(FONT(row.name, _color=color, _face=face_bold),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(row.type, _color=color, _face=face),
_align="left", _cellpadding=cellpadding,
_border=border),
TD(FONT(types(row), _color=color, _face=face),
_align="center", _cellpadding=cellpadding,
_border=border)))
return "< %s >" % TABLE(*rows, **dict(_bgcolor=bgcolor, _border=1,
_cellborder=0, _cellspacing=0)
).xml()
def manage():
tables = manager_action['tables']
if isinstance(tables[0], str):
db = manager_action.get('db', auth.db)
db = globals()[db] if isinstance(db, str) else db
tables = [db[table] for table in tables]
if request.args(0) == 'auth':
auth.table_user()._plural = T('Users')
auth.table_group()._plural = T('Roles')
auth.table_membership()._plural = T('Memberships')
auth.table_permission()._plural = T('Permissions')
if request.extension != 'load':
return dict(heading=manager_action.get('heading',
T('Manage %(action)s') % dict(action=request.args(0).replace('_', ' ').title())),
tablenames=[table._tablename for table in tables],
labels=[table._plural.title() for table in tables])
table = tables[request.args(1, cast=int)]
formname = '%s_grid' % table._tablename
linked_tables = orderby = None
if request.args(0) == 'auth':
auth.table_group()._id.readable = \
auth.table_membership()._id.readable = \
auth.table_permission()._id.readable = False
auth.table_membership().user_id.label = T('User')
auth.table_membership().group_id.label = T('Role')
auth.table_permission().group_id.label = T('Role')
auth.table_permission().name.label = T('Permission')
if table == auth.table_user():
linked_tables = [auth.settings.table_membership_name]
elif table == auth.table_group():
orderby = 'role' if not request.args(3) or '.group_id' not in request.args(3) else None
elif table == auth.table_permission():
orderby = 'group_id'
kwargs = dict(user_signature=True, maxtextlength=1000,
orderby=orderby, linked_tables=linked_tables)
smartgrid_args = manager_action.get('smartgrid_args', {})
kwargs.update(**smartgrid_args.get('DEFAULT', {}))
kwargs.update(**smartgrid_args.get(table._tablename, {}))
grid = SQLFORM.smartgrid(table, args=request.args[:2], formname=formname, **kwargs)
return grid
def hooks():
import functools
import inspect
list_op = ['_%s_%s' %(h,m) for h in ['before', 'after'] for m in ['insert','update','delete']]
tables = []
with_build_it = False
for db_str in sorted(databases):
db = databases[db_str]
for t in db.tables:
method_hooks = []
for op in list_op:
functions = []
for f in getattr(db[t], op):
if hasattr(f, '__call__'):
try:
if isinstance(f, (functools.partial)):
f = f.func
filename = inspect.getsourcefile(f)
details = {'funcname':f.__name__,
'filename':filename[len(request.folder):] if request.folder in filename else None,
'lineno': inspect.getsourcelines(f)[1]}
if details['filename']: # Built in functions as delete_uploaded_files are not editable
details['url'] = URL(a='admin',c='default',f='edit', args=[request['application'], details['filename']],vars={'lineno':details['lineno']})
if details['filename'] or with_build_it:
functions.append(details)
# compiled app and windows build don't support code inspection
except:
pass
if len(functions):
method_hooks.append({'name': op, 'functions':functions})
if len(method_hooks):
tables.append({'name': "%s.%s" % (db_str, t), 'slug': IS_SLUG()("%s.%s" % (db_str,t))[0], 'method_hooks':method_hooks})
# Render
ul_main = UL(_class='nav nav-list')
for t in tables:
ul_main.append(A(t['name'], _onclick="collapse('a_%s')" % t['slug']))
ul_t = UL(_class='nav nav-list', _id="a_%s" % t['slug'], _style='display:none')
for op in t['method_hooks']:
ul_t.append(LI(op['name']))
ul_t.append(UL([LI(A(f['funcname'], _class="editor_filelink", _href=f['url']if 'url' in f else None, **{'_data-lineno':f['lineno']-1})) for f in op['functions']]))
ul_main.append(ul_t)
return ul_main
# ##########################################################
# d3 based model visualizations
# ###########################################################
def d3_graph_model():
""" See https://www.facebook.com/web2py/posts/145613995589010 from Bruno Rocha
and also the app_admin bg_graph_model function
Create a list of table dicts, called "nodes"
"""
nodes = []
links = []
for database in databases:
db = eval_in_global_env(database)
for tablename in db.tables:
fields = []
for field in db[tablename]:
f_type = field.type
if not isinstance(f_type, str):
disp = ' '
elif f_type == 'string':
disp = field.length
elif f_type == 'id':
disp = "PK"
elif f_type.startswith('reference') or \
f_type.startswith('list:reference'):
disp = "FK"
else:
disp = ' '
fields.append(dict(name=field.name, type=field.type, disp=disp))
if isinstance(f_type, str) and (
f_type.startswith('reference') or
f_type.startswith('list:reference')):
referenced_table = f_type.split()[1].split('.')[0]
links.append(dict(source=tablename, target = referenced_table))
nodes.append(dict(name=tablename, type="table", fields = fields))
# d3 v4 allows individual modules to be specified. The complete d3 library is included below.
response.files.append(URL('admin','static','js/d3.min.js'))
response.files.append(URL('admin','static','js/d3_graph.js'))
return dict(databases=databases, nodes=nodes, links=links)
| [
"darkangelice6@gmail.com"
] | darkangelice6@gmail.com |
b61514eb1142df5237a51fdb02a3262b95cadbd4 | dfe65f843d446493ca43afe6b054595c977c953b | /store/migrations/0001_initial.py | 29feddddb5fa60a54b9b68f57d59e49cb56159e4 | [] | no_license | omprakashn27/djnagoecom | a7d40066ec93b3244dc6c7a736f8c288147d5f46 | 7a1061c39f04a35c1dc95fcde352041a48285475 | refs/heads/main | 2023-07-21T21:35:46.498539 | 2021-09-07T10:25:52 | 2021-09-07T10:25:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,354 | py | # Generated by Django 3.1.7 on 2021-08-15 12:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import store.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.CharField(max_length=150)),
('name', models.CharField(max_length=150)),
('category_image', models.ImageField(blank=True, null=True, upload_to=store.models.get_file_path)),
('description', models.CharField(max_length=150)),
('status', models.BooleanField(default=False, help_text='0=default,1=hidden')),
('trending', models.BooleanField(default=False, help_text='0=default,1=trending')),
('meta_title', models.CharField(max_length=500)),
('meta_keywords', models.CharField(max_length=500)),
('meta_description', models.CharField(max_length=500)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fname', models.CharField(max_length=150)),
('lname', models.CharField(max_length=150)),
('email', models.EmailField(max_length=254)),
('phone', models.CharField(max_length=150)),
('address1', models.CharField(max_length=150)),
('address2', models.CharField(max_length=150)),
('city', models.CharField(max_length=150)),
('state', models.CharField(max_length=150)),
('country', models.CharField(max_length=150)),
('pincode', models.CharField(max_length=150)),
('total_price', models.FloatField()),
('payment_mode', models.CharField(default='0', max_length=150)),
('payment_id', models.CharField(blank=True, max_length=150, null=True)),
('status', models.CharField(choices=[('Pending', 'Pending'), ('Out For Shipping', 'Out For Shipping'), ('Completed', 'Completed')], default='Pending', max_length=150)),
('message', models.TextField(blank=True, null=True)),
('tracking_no', models.CharField(max_length=150)),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=150)),
('address1', models.CharField(max_length=150)),
('address2', models.CharField(max_length=150)),
('city', models.CharField(max_length=150)),
('state', models.CharField(max_length=150)),
('country', models.CharField(max_length=150)),
('pincode', models.CharField(max_length=150)),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.CharField(max_length=150)),
('name', models.CharField(max_length=150)),
('product_image', models.ImageField(blank=True, null=True, upload_to=store.models.get_file_path)),
('small_description', models.CharField(max_length=150)),
('quantity', models.IntegerField()),
('description', models.CharField(max_length=150)),
('original_price', models.FloatField()),
('selling_price', models.FloatField()),
('trending', models.BooleanField(default=False, help_text='0=default,1=trending')),
('tag', models.CharField(blank=True, max_length=150, null=True)),
('status', models.BooleanField(default=False, help_text='0=default,1=hidden')),
('meta_title', models.CharField(max_length=500)),
('meta_keywords', models.CharField(max_length=500)),
('meta_description', models.CharField(max_length=500)),
('created_at', models.DateTimeField(auto_now_add=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.category')),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.FloatField()),
('quantity', models.IntegerField()),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.product')),
],
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_qty', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"omprakash95844@gmail.com"
] | omprakash95844@gmail.com |
0b5b7f88519fa9b9b26e3ad6652ff1a4672c1541 | f2c773e7ccdd60caf5a7c062305cfcd14d11beec | /AR_Scripts_1.0.16_R21_Deprecated/AR_SwapObjects.py | 85e406b4c862dc6ca5dea0e0ae5157af60259cd3 | [] | no_license | aturtur/cinema4d-scripts | 4ccfbc3403326a79076d9bcf001189cd5427f46a | a87fc6c835db5d205f8428cc67ccd30fdd4b4d4b | refs/heads/master | 2023-07-03T13:34:58.735879 | 2023-06-19T09:57:22 | 2023-06-19T09:57:22 | 63,731,563 | 316 | 49 | null | 2022-04-24T02:31:17 | 2016-07-19T22:15:05 | Python | UTF-8 | Python | false | false | 2,091 | py | """
AR_SwapObjects
Author: Arttu Rautio (aturtur)
Website: http://aturtur.com/
Name-US: AR_SwapObjects
Version: 1.0
Description-US: Swaps selected objects between each other. Holding SHIFT while executing script swaps also objects place in hierarchy.
Written for Maxon Cinema 4D R21.207
Python version 2.7.14
"""
# Libraries
import c4d
# Functions
def swapObjects():
doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document
bc = c4d.BaseContainer() # Initialize Base Container
tempNullA = c4d.BaseObject(c4d.Onull) # Initialize temporary Null object
tempNullB = c4d.BaseObject(c4d.Onull)
selection = doc.GetActiveObjects(c4d.GETACTIVEOBJECTFLAGS_NONE) # Get selection
objA = selection[0] # Get object A
objB = selection[1] # Get objet B
matA = objA.GetMg() # Get object A's global matrix
matB = objB.GetMg() # Get object B's global matrix
doc.AddUndo(c4d.UNDOTYPE_CHANGE, objA) # Add undo for changing object A
doc.AddUndo(c4d.UNDOTYPE_CHANGE, objB) # Add undo for changing object B
tempNullA.InsertBefore(objA) # Insert temp Null A before object A
tempNullB.InsertBefore(objB) # Insert temp Null B before object B
if c4d.gui.GetInputState(c4d.BFM_INPUT_KEYBOARD,c4d.BFM_INPUT_CHANNEL,bc):
if bc[c4d.BFM_INPUT_QUALIFIER] & c4d.QSHIFT: # If 'shift' key is pressed
objA.InsertAfter(tempNullB) # Move object
objB.InsertAfter(tempNullA) # Move object
objA.SetMg(matB) # Set new matrix to object A
objB.SetMg(matA) # Set new matrix to object B
tempNullA.Remove() # Delete temporary objects
tempNullB.Remove()
return True # Everything is fine
def main():
try: # Try to execute following script
doc = c4d.documents.GetActiveDocument() # Get active Cinema 4D document
doc.StartUndo() # Start recording undos
swapObjects() # Run the script
doc.EndUndo() # Stop recording undos
c4d.EventAdd() # Refresh Cinema 4D
except: # If something went wrong
pass # Do nothing
# Execute main()
if __name__=='__main__':
main() | [
"rautio.arttu@gmail.com"
] | rautio.arttu@gmail.com |
8bab3f142dc7c1a3aad90c1a9a629d069ef615f9 | b952f8b08eca66765c2063a5ad54039fd46884fd | /erp/apps/simplewiki/views.py | 0ac320929d7bb22ceca59c57ccf77e9ca77d5fa3 | [] | no_license | Marine-22/ERP | 5585ea21ec1880dbbd9e0154574c1c0ba3e5a095 | b55d6f3cc5010f4231f8859a61579b3aa42cdec6 | refs/heads/master | 2020-05-27T09:27:54.877448 | 2014-10-09T15:18:21 | 2014-10-09T15:18:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,842 | py | # -*- coding: utf-8 -*-
import types
from django.core.urlresolvers import get_callable
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseServerError, HttpResponseForbidden, HttpResponseNotAllowed
from django.utils import simplejson
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext, Context, loader
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.db.models import Q
from django.conf import settings
from django.contrib import messages
from erp.apps.simplewiki.models import *
from erp.apps.simplewiki.settings import *
def view(request, slug):
(article, err) = fetch_from_url(request, slug)
if err:
return err
perm_err = check_permissions(request, article, check_read=True)
if perm_err:
return perm_err
c = RequestContext(request, {'wiki_article': article,
'wiki_write': article.can_write_l(request.user),
'wiki_attachments_write': article.can_attach(request.user),
} )
return render_to_response('simplewiki_view.html', c)
def root_redirect(request):
"""
Reason for redirecting:
The root article needs to to have a specific slug
in the URL, otherwise pattern matching in urls.py will get confused.
I've tried various methods to avoid this, but depending on Django/Python
versions, regexps have been greedy in two different ways.. so I just
skipped having problematic URLs like '/wiki/_edit' for editing the main page.
#benjaoming
"""
try:
root = Article.get_root()
except RootArticleNotFound, e:
err = not_found(request, WIKI_ROOT_SLUG)
return err
return HttpResponseRedirect(reverse('wiki_view', args=(root.slug,)))
@transaction.commit_on_success
def create(request, slug):
url_path = get_url_path(slug)
if request.method == 'POST':
f = CreateArticleForm(request.POST)
if f.is_valid():
article = Article()
article.slug = url_path[-1]
if not request.user.is_anonymous():
article.created_by = request.user
article.title = f.cleaned_data.get('title')
a = article.save()
new_revision = f.save(commit=False)
if not request.user.is_anonymous():
new_revision.revision_user = request.user
new_revision.article = article
new_revision.save()
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
else:
if Article.objects.filter(slug__iexact=url_path[-1]).exists():
messages.add_message(request, messages.ERROR,
"An article with the '%s' slug already exists." % url_path[-1])
return HttpResponseRedirect(reverse('wiki_view', args=(Article.get_root(),)))
f = CreateArticleForm(initial={'title':request.GET.get('wiki_article_name', url_path[-1]),
'contents':_('Headline\n===\n\n')})
c = RequestContext(request, {'wiki_form': f,
'wiki_write': True,
})
return render_to_response('simplewiki_create.html', c)
@transaction.commit_on_success
def edit(request, slug):
(article, err) = fetch_from_url(request, slug)
if err:
return err
# Check write permissions
perm_err = check_permissions(request, article, check_write=True, check_locked=True)
if perm_err:
return perm_err
if WIKI_ALLOW_TITLE_EDIT:
EditForm = RevisionFormWithTitle
else:
EditForm = RevisionForm
if request.method == 'POST':
f = EditForm(request.POST)
if f.is_valid():
new_revision = f.save(commit=False)
new_revision.article = article
# Check that something has actually been changed...
if not new_revision.get_diff():
return (None, HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),))))
if not request.user.is_anonymous():
new_revision.revision_user = request.user
new_revision.save()
if WIKI_ALLOW_TITLE_EDIT:
new_revision.article.title = f.cleaned_data['title']
new_revision.article.save()
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
else:
f = EditForm({'contents': article.current_revision.contents, 'title': article.title})
c = RequestContext(request, {'wiki_form': f,
'wiki_write': True,
'wiki_article': article,
'wiki_attachments_write': article.can_attach(request.user),
})
return render_to_response('simplewiki_edit.html', c)
@transaction.commit_on_success
def history(request, slug, page=1):
(article, err) = fetch_from_url(request, slug)
if err:
return err
perm_err = check_permissions(request, article, check_read=True)
if perm_err:
return perm_err
page_size = 10
try:
p = int(page)
except ValueError:
p = 1
history = Revision.objects.filter(article__exact = article).order_by('-counter')
if request.method == 'POST':
if request.POST.__contains__('revision'):
perm_err = check_permissions(request, article, check_write=True, check_locked=True)
if perm_err:
return perm_err
try:
r = int(request.POST['revision'])
article.current_revision = Revision.objects.get(id=r)
article.save()
except:
pass
finally:
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
page_count = (history.count()+(page_size-1)) / page_size
if p > page_count:
p = 1
beginItem = (p-1) * page_size
next_page = p + 1 if page_count > p else None
prev_page = p - 1 if p > 1 else None
c = RequestContext(request, {'wiki_page': p,
'wiki_next_page': next_page,
'wiki_prev_page': prev_page,
'wiki_write': article.can_write_l(request.user),
'wiki_attachments_write': article.can_attach(request.user),
'wiki_article': article,
'wiki_history': history[beginItem:beginItem+page_size],})
return render_to_response('simplewiki_history.html', c)
def search_articles(request, slug):
# blampe: We should check for the presence of other popular django search
# apps and use those if possible. Only fall back on this as a last resort.
# Adding some context to results (eg where matches were) would also be nice.
# todo: maybe do some perm checking here
if request.method == 'POST':
querystring = request.POST['value'].strip()
if querystring:
results = Article.objects.all()
for queryword in querystring.split():
# Basic negation is as fancy as we get right now
if queryword[0] == '-' and len(queryword) > 1:
results._search = lambda x: results.exclude(x)
queryword = queryword[1:]
else:
results._search = lambda x: results.filter(x)
results = results._search(Q(current_revision__contents__icontains = queryword) | \
Q(title = queryword))
else:
# Need to throttle results by splitting them into pages...
results = Article.objects.all()
if results.count() == 1:
messages.add_message(request, messages.INFO,
"Showing this page because it was the only page found for the entered query.")
return HttpResponseRedirect(reverse('wiki_view', args=(results[0].get_url(),)))
else:
c = RequestContext(request, {'wiki_search_results': results,
'wiki_search_query': querystring})
return render_to_response('simplewiki_searchresults.html', c)
return view(request, slug)
def search_related(request, slug):
(article, err) = fetch_from_url(request, slug)
if err:
return err
perm_err = check_permissions(request, article, check_read=True)
if perm_err:
return perm_err
search_string = request.GET.get('query', None)
self_pk = request.GET.get('self', None)
if search_string:
results = []
related = Article.objects.filter(title__istartswith = search_string)
others = article.related.all()
if self_pk:
related = related.exclude(pk=self_pk)
if others:
related = related.exclude(related__in = others)
related = related.order_by('title')[:10]
for item in related:
results.append({'id': str(item.id),
'value': item.title,
'info': item.get_url()})
else:
results = []
json = simplejson.dumps({'results': results})
return HttpResponse(json, mimetype='application/json')
@transaction.commit_on_success
def add_related(request, slug):
(article, err) = fetch_from_url(request, slug)
if err:
return err
perm_err = check_permissions(request, article, check_write=True, check_locked=True)
if perm_err:
return perm_err
try:
related_id = request.POST['id']
rel = Article.objects.get(id=related_id)
has_already = article.related.filter(id=related_id).count()
if has_already == 0 and not rel == article:
article.related.add(rel)
article.save()
except:
pass
finally:
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
@transaction.commit_on_success
def remove_related(request, slug, related_id):
(article, err) = fetch_from_url(request, slug)
if err:
return err
perm_err = check_permissions(request, article, check_write=True, check_locked=True)
if perm_err:
return perm_err
try:
rel_id = int(related_id)
rel = Article.objects.get(id=rel_id)
article.related.remove(rel)
article.save()
except:
pass
finally:
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
def random_article(request, slug):
from random import randint
num_arts = Article.objects.count()
if num_arts == 0:
return root_redirect(request)
article = Article.objects.all()[randint(0, num_arts-1)]
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
def encode_err(request, url):
return render_to_response('simplewiki_error.html',
RequestContext(request, {'wiki_err_encode': True}))
def not_found(request, slug):
"""Generate a NOT FOUND message for some URL"""
return render_to_response('simplewiki_error.html',
RequestContext(request, {'wiki_err_notfound': True,
'wiki_url': slug}))
def get_url_path(url):
"""Return a list of all actual elements of a url, safely ignoring
double-slashes (//) """
return filter(lambda x: x!='', url.split('/'))
def fetch_from_url(request, url):
"""Analyze URL, returning the article and the articles in its path
If something goes wrong, return an error HTTP response"""
try:
article = Article.objects.get(slug__iexact=url)
return (article, None)
except:
err = not_found(request, url)
return (None, err)
def check_permissions(request, article, check_read=False, check_write=False, check_locked=False):
read_err = check_read and not article.can_read(request.user)
write_err = check_write and not article.can_write(request.user)
locked_err = check_locked and article.locked
if read_err or write_err or locked_err:
c = RequestContext(request, {'wiki_article': article,
'wiki_err_noread': read_err,
'wiki_err_nowrite': write_err,
'wiki_err_locked': locked_err,})
# TODO: Make this a little less jarring by just displaying an error
# on the current page? (no such redirect happens for an anon upload yet)
# benjaoming: I think this is the nicest way of displaying an error, but
# these errors shouldn't occur, but rather be prevented on the other pages.
return render_to_response('simplewiki_error.html', c)
else:
return None
####################
# LOGIN PROTECTION #
####################
if WIKI_REQUIRE_LOGIN_VIEW:
view = login_required(view)
history = login_required(history)
search_related = login_required(search_related)
encode_err = login_required(encode_err)
if WIKI_REQUIRE_LOGIN_EDIT:
create = login_required(create)
edit = login_required(edit)
add_related = login_required(add_related)
remove_related = login_required(remove_related)
if WIKI_CONTEXT_PREPROCESSORS:
settings.TEMPLATE_CONTEXT_PROCESSORS = settings.TEMPLATE_CONTEXT_PROCESSORS + WIKI_CONTEXT_PREPROCESSORS
| [
"root@Debian-60-squeeze-32-minimal.(none)"
] | root@Debian-60-squeeze-32-minimal.(none) |
66eee5b3e6193fdd3fbf93572531c18f032831fc | 5905ed0409c332492409d7707528452b19692415 | /google-cloud-sdk/lib/googlecloudsdk/command_lib/artifacts/print_settings/gradle.py | 82a99b6bd2e49073fe4da73c767a02d9c12bb651 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | millerthomasj/google-cloud-sdk | c37b7ddec08afadec6ee4c165153cd404f7dec5e | 3deda6696c3be6a679689b728da3a458c836a24e | refs/heads/master | 2023-08-10T16:03:41.819756 | 2021-09-08T00:00:00 | 2021-09-08T15:08:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,051 | py | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for forming settings for gradle."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
SERVICE_ACCOUNT_TEMPLATE = """\
// Move the secret to ~/.gradle.properties
def artifactRegistryMavenSecret = "{password}"
// Insert following snippet into your build.gradle
// see docs.gradle.org/current/userguide/publishing_maven.html
plugins {{
id "maven-publish"
}}
publishing {{
repositories {{
maven {{
url "https://{location}-maven.pkg.dev/{repo_path}"
credentials {{
username = "{username}"
password = "$artifactRegistryMavenSecret"
}}
}}
}}
}}
repositories {{
maven {{
url "https://{location}-maven.pkg.dev/{repo_path}"
credentials {{
username = "{username}"
password = "$artifactRegistryMavenSecret"
}}
authentication {{
basic(BasicAuthentication)
}}
}}
}}
"""
NO_SERVICE_ACCOUNT_TEMPLATE = """\
// Insert following snippet into your build.gradle
// see docs.gradle.org/current/userguide/publishing_maven.html
plugins {{
id "maven-publish"
id "com.google.cloud.artifactregistry.gradle-plugin" version "{extension_version}"
}}
publishing {{
repositories {{
maven {{
url "artifactregistry://{location}-maven.pkg.dev/{repo_path}"
}}
}}
}}
repositories {{
maven {{
url "artifactregistry://{location}-maven.pkg.dev/{repo_path}"
}}
}}
"""
| [
"gcloud@google.com"
] | gcloud@google.com |
83c234db09049cf8803edc2703c52326a9257c03 | 6023d9958b0d03ba905d8052f6fff8bbeb43bd8a | /ch01Sort/HeapSort.py | 1b082d94d87602575ef3fb6b41c4f1218c59cd65 | [] | no_license | EchoLLLiu/Basic_Algorithm | bd9a4f20803b951420411b2acdfa701cd73e1aa9 | f6acb797a9ae3b23dddd8a49bec4478f1a4cd6c4 | refs/heads/master | 2020-03-19T20:10:39.492699 | 2018-06-11T07:39:12 | 2018-06-11T07:39:12 | 136,890,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,674 | py | # -*- coding:utf-8 -*-
__author__ = 'LY'
__time__ = '2018/6/11'
# 堆排(完全二叉树:直接利用列表)
# (1)从最后一个非叶子节点开始,构建初始大根堆:根比孩子大
# (2)将第一个元素(最大)与最后一个元素交换,此时[0,...,n-1]为无序,[n]为有序,对[0,...,n-1]进行(1)
# 再将第一个元素与无序列表中最后一个交换,此时[0,...,n-2]无序,[n-1,n]有序,对[0,...,n-2]进行(2),以此类推
class HeapSort:
'''堆排类'''
def Max_Heapify(self, heap, heapsize, root):
'''堆调整,使得根节点值大于子节点值'''
left = root * 2 + 1 # 列表从0开始,左节点下标为 2i+1,右节点下标为 2i+2
right = left + 1
larger = root
if left < heapsize and heap[left] > heap[larger]:
larger = left
if right < heapsize and heap[right] > heap[larger]:
larger = right
if larger != root:
heap[larger], heap[root] = heap[root], heap[larger]
self.Max_Heapify(heap, heapsize, larger)
def Build_Max_Heap(self, heap):
'''构建初始大根堆'''
heapsize = len(heap)
for i in range((heapsize-2)//2, -1, -1):
# 从最后一个非叶子节点开始构建
self.Max_Heapify(heap, heapsize, i)
def Heap_Sort(self, heap):
'''堆排序'''
self.Build_Max_Heap(heap)
for i in range(len(heap)-1, -1, -1):
heap[0], heap[i] = heap[i], heap[0]
self.Max_Heapify(heap, i, 0)
return heap[::-1]
if __name__ == '__main__':
heap = [30, 50, 57, 77, 62, 78, 94, 80, 84]
hs = HeapSort()
print("待排序大根堆:", end=' ')
print(heap)
print("大根堆排序 :", end=' ')
sort_heap = hs.Heap_Sort(heap)
print(sort_heap) | [
"echoliu61@foxmail.com"
] | echoliu61@foxmail.com |
9abb36093dd4eb32eac15ae3bed936f6cee0244f | 840c4a703bcf96346cf52f4ea01d98c915939246 | /src/models/train_model.py | 2db599f10b412ff3bbc113e405cb8fa0b439f208 | [] | no_license | alphagov-mirror/tagging-suggester | 4b5b8c4e67ffc65e5e8b588b30b5725b0086182b | 1d5a6d54cefbf03efb32f67ae779eedd2f3d0071 | refs/heads/master | 2020-07-13T23:34:12.543645 | 2019-12-16T10:49:10 | 2019-12-16T10:49:10 | 205,177,279 | 0 | 0 | null | 2019-08-29T14:02:20 | 2019-08-29T14:02:19 | null | UTF-8 | Python | false | false | 146 | py | from models.apex_node_predictor import *
if __name__ == "__main__":
apex_node_predictor = ApexNodePredictor()
apex_node_predictor.train() | [
"oscar.wyatt@digital.cabinet-office.gov.uk"
] | oscar.wyatt@digital.cabinet-office.gov.uk |
49fa812649395ceaca60266b1cbbfc310e8da3bf | cd839f096fff4d14709841de4ceb25bb38224075 | /vertualizor/vertualizor/rest_api/dev.py | 309c8c408e044cae1f991eeb74da55088db7a118 | [] | no_license | aliscie/Tweet-clone-Tweetme-2 | 2506f92d9b10f17f26594d524395f569c7e79b82 | b8c9c8d66c8de3daf33d43d8e8236fc4afecdd29 | refs/heads/master | 2023-01-31T01:12:56.647684 | 2020-12-15T07:13:20 | 2020-12-15T07:13:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | from django.contrib.auth import get_user_model
from rest_framework import authentication
User = get_user_model()
class DevAuthentication(authentication.BasicAuthentication):
def authenticate(self, request):
qs = User.objects.filter(id=1)
user = qs.order_by('?').first()
return(user, None)
| [
"weplutus.1@gmail.com"
] | weplutus.1@gmail.com |
463eaab959122d3d7a4c548f6fb1790870651e6d | d75baa992299c5547f92fcebcfcf35d314e5b010 | /app/models/Estado.py | 94cd9c094892fee9dc2c1d89f8c901bfc61c725b | [
"Apache-2.0"
] | permissive | GeovaneCavalcante/Jeffson | f5af44cbdcca433a9b216d89316f3e490fc85fac | efb0232a9d79106a07b01655c82ec56572373a59 | refs/heads/master | 2021-06-13T03:39:21.142367 | 2017-04-17T15:23:05 | 2017-04-17T15:23:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from django.db import models
class Estado(models.Model):
id = models.IntegerField(primary_key=True)
uf = models.CharField(max_length = 2)
nome = models.TextField()
| [
"geovanefeitosacavalcante@gmail.com/"
] | geovanefeitosacavalcante@gmail.com/ |
3f0f5477db60e2c10a868ce2c3f8c576f327c1dc | 0a345f37466570e8e85829c8d89b2c6337fe8633 | /src/products/migrations/0002_product_pricechangedate.py | 5b288de16e90b4756ef0a7b37b9008e07edaf9b2 | [] | no_license | priyankarenty/ecommercev1 | 4cc30d892497d57b68349a5f0395bd8e0b9e8c61 | 93115db1072d5fb51deb57bac8dc383b1e686e6e | refs/heads/master | 2022-12-09T04:28:22.326099 | 2018-04-17T10:51:01 | 2018-04-17T10:51:01 | 129,889,874 | 0 | 0 | null | 2022-12-08T01:00:14 | 2018-04-17T10:49:00 | Python | UTF-8 | Python | false | false | 455 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-19 05:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='PriceChangeDate',
field=models.DateField(blank=True, null=True),
),
]
| [
"priyanka@Priyankas-MacBook-Air.local"
] | priyanka@Priyankas-MacBook-Air.local |
a68135e8c64df81ffe1e024edfa94cffcb8b9e5c | d911c8f76968a9d6e5bce9824ed922ece1778199 | /src/ieee754/part_ass/assign.py | 00cebc6dab33a25f93d4e790dd7883fc859bae9e | [] | no_license | ChipFlow/ieee754fpu | 1d85bf9907c7e456b4e1c281dfcbd7dc2b7e9016 | 0cdf4be4df5c0fbae476442c1a91b0e8140e2104 | refs/heads/master | 2023-08-16T11:31:05.619598 | 2021-10-02T09:39:43 | 2021-10-02T09:39:43 | 412,763,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,401 | py | # SPDX-License-Identifier: LGPL-2.1-or-later
# See Notices.txt for copyright information
"""
Copyright (C) 2021 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
dynamically-partitionable "assign" class, directly equivalent
to nmigen Assign
See:
* http://libre-riscv.org/3d_gpu/architecture/dynamic_simd/assign
* http://bugs.libre-riscv.org/show_bug.cgi?id=709
"""
from nmigen import Signal, Module, Elaboratable, Cat, Const, signed
from nmigen.back.pysim import Simulator, Settle
from nmutil.extend import ext
from ieee754.part_mul_add.partpoints import PartitionPoints
from ieee754.part.partsig import PartitionedSignal
def get_runlengths(pbit, size):
res = []
count = 1
# identify where the 1s are, which indicates "start of a new partition"
# we want a list of the lengths of all partitions
for i in range(size):
if pbit & (1<<i): # it's a 1: ends old partition, starts new
res.append(count) # add partition
count = 1 # start again
else:
count += 1
# end reached, add whatever is left. could have done this by creating
# "fake" extra bit on the partitions, but hey
res.append(count)
print ("get_runlengths", bin(pbit), size, res)
return res
class PartitionedAssign(Elaboratable):
def __init__(self, shape, assign, mask):
"""Create a ``PartitionedAssign`` operator
"""
# work out the length (total of all PartitionedSignals)
self.assign = assign
if isinstance(mask, dict):
mask = list(mask.values())
self.mask = mask
self.shape = shape
self.output = PartitionedSignal(mask, self.shape, reset_less=True)
self.partition_points = self.output.partpoints
self.mwidth = len(self.partition_points)+1
def get_chunk(self, y, numparts):
x = self.assign
if not isinstance(x, PartitionedSignal):
# assume Scalar. totally different rules
end = numparts * (len(x) // self.mwidth)
return x[:end]
# PartitionedSignal: start at partition point
keys = [0] + list(x.partpoints.keys()) + [len(x)]
# get current index and increment it (for next Assign chunk)
upto = y[0]
y[0] += numparts
print ("getting", upto, numparts, keys, len(x))
# get the partition point as far as we are up to
start = keys[upto]
end = keys[upto+numparts]
print ("start end", start, end, len(x))
return x[start:end]
def elaborate(self, platform):
m = Module()
comb = m.d.comb
keys = list(self.partition_points.keys())
print ("keys", keys, "values", self.partition_points.values())
print ("mask", self.mask)
outpartsize = len(self.output) // self.mwidth
width, signed = self.output.shape()
print ("width, signed", width, signed)
with m.Switch(Cat(self.mask)):
# for each partition possibility, create a Assign sequence
for pbit in range(1<<len(keys)):
# set up some indices pointing to where things have got
# then when called below in the inner nested loop they give
# the relevant sequential chunk
output = []
y = [0]
# get a list of the length of each partition run
runlengths = get_runlengths(pbit, len(keys))
print ("pbit", bin(pbit), "runs", runlengths)
for i in runlengths: # for each partition
thing = self.get_chunk(y, i) # sequential chunks
# now check the length: truncate, extend or leave-alone
outlen = i * outpartsize
tlen = len(thing)
thing = ext(thing, (tlen, signed), outlen)
output.append(thing)
with m.Case(pbit):
# direct access to the underlying Signal
comb += self.output.sig.eq(Cat(*output))
return m
def ports(self):
if isinstance(self.assign, PartitionedSignal):
return [self.assign.lower(), self.output.lower()]
return [self.assign, self.output.lower()]
if __name__ == "__main__":
from ieee754.part.test.test_partsig import create_simulator
m = Module()
mask = Signal(3)
a = PartitionedSignal(mask, 32)
m.submodules.ass = ass = PartitionedAssign(signed(48), a, mask)
omask = (1<<len(ass.output))-1
traces = ass.ports()
sim = create_simulator(m, traces, "partass")
def process():
yield mask.eq(0b000)
yield a.sig.eq(0xa12345c7)
yield Settle()
out = yield ass.output.sig
print("out 000", bin(out&omask), hex(out&omask))
yield mask.eq(0b010)
yield Settle()
out = yield ass.output.sig
print("out 010", bin(out&omask), hex(out&omask))
yield mask.eq(0b110)
yield Settle()
out = yield ass.output.sig
print("out 110", bin(out&omask), hex(out&omask))
yield mask.eq(0b111)
yield Settle()
out = yield ass.output.sig
print("out 111", bin(out&omask), hex(out&omask))
sim.add_process(process)
with sim.write_vcd("partition_ass.vcd", "partition_ass.gtkw",
traces=traces):
sim.run()
# Scalar
m = Module()
mask = Signal(3)
a = Signal(32)
m.submodules.ass = ass = PartitionedAssign(signed(48), a, mask)
omask = (1<<len(ass.output))-1
traces = ass.ports()
sim = create_simulator(m, traces, "partass")
def process():
yield mask.eq(0b000)
yield a.eq(0xa12345c7)
yield Settle()
out = yield ass.output.sig
print("out 000", bin(out&omask), hex(out&omask))
yield mask.eq(0b010)
yield Settle()
out = yield ass.output.sig
print("out 010", bin(out&omask), hex(out&omask))
yield mask.eq(0b110)
yield Settle()
out = yield ass.output.sig
print("out 110", bin(out&omask), hex(out&omask))
yield mask.eq(0b111)
yield Settle()
out = yield ass.output.sig
print("out 111", bin(out&omask), hex(out&omask))
sim.add_process(process)
with sim.write_vcd("partition_ass.vcd", "partition_ass.gtkw",
traces=traces):
sim.run()
| [
"lkcl@lkcl.net"
] | lkcl@lkcl.net |
ed79ebe24351df128ab470b7dd0dec754bb1d13a | 86634c50729039cc31a36bc077f8184d518b5129 | /significant_qso_pairs.py | 5d392b6866969e7c26791e07c7463e50584c8af0 | [
"MIT"
] | permissive | yishayv/lyacorr | 6332df2c534e9ac7d139023674ecd279181181c2 | deed114b4cadd4971caec68e2838a5fac39827b1 | refs/heads/master | 2021-01-23T00:06:41.776274 | 2017-05-09T13:25:32 | 2017-05-09T13:25:32 | 85,701,601 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | import numpy as np
class SignificantQSOPairs:
def __init__(self, num_elements=20, dtype=float, initial_value=np.nan):
self.ar_qso1 = np.zeros(shape=num_elements, dtype=int)
self.ar_qso2 = np.zeros(shape=num_elements, dtype=int)
self.ar_values = np.full(shape=num_elements, fill_value=initial_value, dtype=dtype)
self.current_index_of_minimum = 0
def add_if_larger(self, qso1, qso2, value):
# if the current minimum value is smaller than the new value, replace it
n = self.current_index_of_minimum
# the condition is negated so that NANs are always replaced.
if not self.ar_values[n] >= value:
self.ar_qso1[n] = qso1
self.ar_qso2[n] = qso2
self.ar_values[n] = value
# find the new minimum, and store it for fast access
self.current_index_of_minimum = self.ar_values.argmin()
def save(self, filename):
ar = np.vstack((self.ar_qso1, self.ar_qso2, self.ar_values))
np.save(filename, ar)
| [
"yishayvadai@mail.tau.ac.il"
] | yishayvadai@mail.tau.ac.il |
c78bd46d107941971e5ccbb87a5f346b8584d902 | 05a70c12df808455100598d8a6fdb5635c641ab8 | /Ago-Dic-2019/Luis Llanes/Practica1/ejercicio4-6.py | 947b7bb18c11b7094451454ce8f7d6b447784ac4 | [
"MIT"
] | permissive | Jonathan-aguilar/DAS_Sistemas | 991edcc929c33ba9bb8bc84e741b55c10a8420a3 | 4d02efc64161871084df1bff258112351e5d1241 | refs/heads/development | 2023-07-24T12:26:54.698452 | 2021-09-02T20:52:26 | 2021-09-02T20:52:26 | 289,764,892 | 1 | 0 | MIT | 2021-09-02T20:52:27 | 2020-08-23T20:54:55 | Python | UTF-8 | Python | false | false | 113 | py | #del 1 al 20 mostrando pares
pares=[]
for i in range(1,21,2):
pares.append(i+1)
print(i+1)
print(pares) | [
"luis_llanesn@hotmail.com"
] | luis_llanesn@hotmail.com |
3ff91aed860feace941ae6655b6fa87c1da6c541 | 7ab1948dd28fa099ac115f5799440896e1ef7359 | /DiscerningBot.py | 00b57af996e64669181e72b478d144f38f7f6f9b | [] | no_license | gliliumho/halite-bot | 7ea2a265fe7c712b1050f31ba0bbad63add9411d | b3cfaf2133e77fee7dcd58f29a97b06110546013 | refs/heads/master | 2021-01-19T07:35:23.194349 | 2016-12-21T03:20:36 | 2016-12-21T03:20:36 | 77,011,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | import hlt
from hlt import NORTH, EAST, SOUTH, WEST, STILL, Move, Square
import random, math
myID, game_map = hlt.get_init()
hlt.send_init("DiscerningBot")
def nearest_enemy_direction(square):
min_direction = WEST
max_dist = min(game_map.width, game_map.height)/2
for direction, neighbor in enumerate(game_map.neighbors(square)):
distance = 0
current = neighbor
while current.owner == myID and distance < max_dist:
distance += 1
current = game_map.get_target(current, direction)
if distance < max_dist:
min_direction = direction
max_dist = distance
return min_direction
def heuristic(square):
if square.strength:
return square.production/square.strength
else:
return square.production
def assign_move(square):
target, direction = max(((neighbor, direction) for direction, neighbor in enumerate(game_map.neighbors(square))
if neighbor.owner != myID),
default = (None, None),
key = lambda t: heuristic(t[0]))
if (target != None) and (target.strength < square.strength):
return Move(square, direction)
elif square.strength < 5 * square.production:
return Move(square, STILL)
border = any(neighbor.owner != myID for neighbor in game_map.neighbors(square))
if (not border):
return Move(square, nearest_enemy_direction(square))
# return Move(square, random.choice((NORTH, WEST)))
else:
return Move(square, STILL)
while True:
game_map.get_frame()
moves = [assign_move(square) for square in game_map if square.owner == myID]
hlt.send_frame(moves)
| [
"gliliumho@gmail.com"
] | gliliumho@gmail.com |
86afbabd7bc5ba70376460b1ae08e5e9179e22d3 | af060eff34662f1140ab1869c2024f01f5a01f10 | /backend/config/settings.py | 46629ccce45910002fcf746b3201c8a7e0b2ce72 | [] | no_license | pnowosie/containers-play | 3a93f468cb526f7eb5498e10ab1af4f460a8fc2c | cf762cc97bdb20d34d1b20147214842aa9063c5e | refs/heads/master | 2023-05-26T23:43:28.415436 | 2020-06-02T14:41:12 | 2020-06-02T14:41:12 | 268,602,257 | 0 | 0 | null | 2023-05-22T23:29:15 | 2020-06-01T18:35:00 | Python | UTF-8 | Python | false | false | 144 | py | import os
DEBUG = True
# SQLAlchemy
db_uri = os.getenv('DATABASE_URL')
SQLALCHEMY_DATABASE_URI = db_uri
SQLALCHEMY_TRACK_MODIFICATIONS = False | [
"Pawel.Nowosielski@imapp.pl"
] | Pawel.Nowosielski@imapp.pl |
0203ec0c496d3d7b043d6d733f646b329d84abbf | d195dcfcb725ff3a83339ef8274bde6f61757fca | /show_anchorage_detail.py | 9746022da3787df6a34198258c034e459dc774fb | [] | no_license | deknapp/ak_redistrict | 3214b8859a27fb74682720247a2f21a8157d4102 | 18503cc47ec514361b7006ed293e3cca89b77d42 | refs/heads/master | 2020-07-07T20:49:42.540662 | 2019-10-28T04:53:51 | 2019-10-28T04:53:51 | 203,474,165 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | import geopandas
import matplotlib.pyplot as plt
import pandas
import os
import osr
import legislators
import districts
leg_gdf_plot = legislators.get_leg_plot('rep')
district_gdf = districts.get_district_gdf()
final = district_gdf.plot(ax=leg_gdf_plot, color="none", edgecolor='black', facecolor="none")
final = districts.label_districts(final)
anchorage_max_long = -149.72
anchorage_min_long = -150
anchorage_min = 61.09
anchorage_max = 61.25
plt.ylim(anchorage_min, anchorage_max)
plt.xlim(anchorage_min_long, anchorage_max_long)
#plt.show()
final.get_figure().savefig('/Users/nknapp/Desktop/basic_legislator_plot.pdf')
| [
"nathaniel.knapp@gmail.com"
] | nathaniel.knapp@gmail.com |
d2ea9493af1117d1f5dfbb512c53ab69b79f579c | ae7299517299b41f0b27a604165fe5ca6059b7ef | /ASPDNet-pytorch/val.py | 47dc392621ae71b87fb2e62a9f71751b76b6a255 | [] | no_license | liuqingjie/ASPDNet | 4372d6b8fbe99c2778b23e23ae57f6317f1ea07c | 26e9ee197465f31081d5d3bf65a6ec8ce6a1e36b | refs/heads/master | 2021-04-07T20:18:07.681040 | 2020-04-02T09:29:18 | 2020-04-02T09:29:18 | 248,705,860 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,891 | py | import h5py
import scipy.io as io
import PIL.Image as Image
import numpy as np
import os
import glob
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import scipy
import json
import torchvision.transforms.functional as F
from matplotlib import cm as CM
from image import *
from model import ASPDNet
import torch
from torchvision import datasets, transforms
transform=transforms.Compose([
transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
root = '..'
#now generate the building's ground truth
FJ_train = os.path.join(root,'FJ/train_data','images')
FJ_test = os.path.join(root,'FJ/test_data','images')
# SC_train = os.path.join(root,'SC/train_data','images')
# SC_test = os.path.join(root,'SC/test_data','images')
path_sets = [FJ_test]
img_paths = []
for path in path_sets:
for img_path in glob.glob(os.path.join(path, '*.jpg')):
img_paths.append(img_path)
model = ASPDNet()
model = model.cuda()
checkpoint = torch.load('model_best.pth.tar')
model.load_state_dict(checkpoint['state_dict'])
mae = 0
mse = 0
for i in range(len(img_paths)):
file_path, filename = os.path.split(img_paths[i])
img = transform(Image.open(img_paths[i]).convert('RGB')).cuda()
gt_file = h5py.File(img_paths[i].replace('.png','.h5').replace('images','ground_truth'),'r')
groundtruth = np.asarray(gt_file['density'])
gt_count = np.sum(groundtruth)
print(gt_count)
with torch.no_grad():
output = model(img.unsqueeze(0))
pre_count = output.detach().cpu().sum().numpy()
mae += abs(pre_count-gt_count)
mse += (pre_count - gt_count) * (pre_count - gt_count)
mae = mae/len(img_paths)
mse = np.sqrt(mse/len(img_paths))
print(mae)
print(mse)
| [
"liuqj.irip@gmail.com"
] | liuqj.irip@gmail.com |
3779d30cfebeda301d57e818ebe55098983d539e | 5809b21a6f0ad038ec512ad597776000f0c0f5a1 | /week_four/task_two_src/apptwo/migrations/0003_auto_20191127_1753.py | b5eb05a48c94255481f19560418799d903611a99 | [] | no_license | Bisoyeolaiya/crud-django-api | 4f9db1514c0c97b5496b3a2b8d635bf58e24ca52 | 90e51f30a133adc97ec69fea1ba5bb180e45b922 | refs/heads/master | 2021-06-28T15:30:02.067316 | 2020-02-03T04:19:06 | 2020-02-03T04:19:06 | 237,881,616 | 0 | 0 | null | 2021-06-10T22:32:32 | 2020-02-03T04:02:56 | Python | UTF-8 | Python | false | false | 1,190 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.26 on 2019-11-27 16:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apptwo', '0002_auto_20191127_1722'),
]
operations = [
migrations.AlterField(
model_name='coach',
name='phone_number',
field=models.CharField(max_length=15, verbose_name='Phone Number'),
),
migrations.AlterField(
model_name='student',
name='age',
field=models.PositiveIntegerField(verbose_name='Age'),
),
migrations.AlterField(
model_name='student',
name='phone_number',
field=models.CharField(max_length=15, verbose_name='Phone Number'),
),
migrations.AlterField(
model_name='task',
name='task_deadline',
field=models.DateField(verbose_name='Task End Date'),
),
migrations.AlterField(
model_name='task',
name='task_startdate',
field=models.DateField(verbose_name='Task Start Date'),
),
]
| [
"olaiyabisoye@gmail.com"
] | olaiyabisoye@gmail.com |
30743d0660f99cca916c12814e164669ead70026 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa2/sample/expr_lists-45.py | 9aff4784319ac14303406fc9b8c82678ed9274ee | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | x:[int] = None
y:[object] = None
z:[bool] = None
o:object = None
x = [1, $Exp, 3]
x = []
y = [1, True]
z = [False, True]
x = None
o = x
o = x = [1]
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
1cba4d3d707cd89b529a4bc616bd90f92866b747 | 12b74d16e7d5590423f4aaa61d03ab1acc1f7dc3 | /ptr.py | d9c888117b0cf092f72d3109f4d1bb633116be3c | [] | no_license | bigger404/ptr | 33dc062cb2f39e76009639aed6988636997e1333 | 1c681703c7a46a42143542cd6d8c8089cf2a922f | refs/heads/master | 2021-05-06T05:16:42.500794 | 2017-12-22T02:35:45 | 2017-12-22T02:35:45 | 115,068,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | from random import randint
class ptr():
def __init__(self,size,allocation):
self.size=size #size in pages
self.limit=100 #max process size in pages
self.allocation=allocation #max frames to allocate
self.ptr=[]
# self.ram=[None]*64
self.disk=[] #initialize sector list
for page in range(self.size):
ind=randint(0,len(self.disk))
self.disk.insert(ind,page)
self.loadPage(0)
def loadPage(self,page_num): #load page into ram and update PTR
print("\t\t** Page fault occured **")
sector = self.disk.index(page_num)
frame_num=None
if not self.isfull(): #add ptr entry at next free
frame_num=len(self.ptr)
self.ptr.append([page_num,frame_num,sector,0])
else:
frame_num=0
self.ptr[0]=[page_num,frame_num,sector,0]#add to front of the ptr
return frame_num
def getPhysical(self,logical):
offset = logical%16
page_num = int(logical/16)
frame_num=None
found=False
for record in self.ptr:
if page_num==record[0]:
frame_num=record[1]
found=True
if not found:
frame_num = self.loadPage(page_num)
return (frame_num*16) + offset
def printPtr(self):
print("P#,F#,Sector#, Valid/Invalid")
for record in self.ptr:
print(record)
def isfull(self):
return len(self.ptr)==self.allocation
def get_address(size):
return randint(0,size*16)
if __name__ == '__main__':
size=int(input("Size of program in pages: "))
if size <1 or size > 100:
size=100
allocation=int(input("Maximum number of frames to allocate: "))
if allocation <1 or allocation >64:
allocation=64
p=ptr(size,allocation)
while (True):
logical = get_address(size) #create a random logical address
print("Logical Address: "+str(logical))
print("Physical address: "+ str(p.getPhysical(logical)))
p.printPtr()
if input("Return for more. Q to quit \n").lower()=="q":
break
| [
"noreply@github.com"
] | noreply@github.com |
67033abb78a0b3fefc67b9051476e1e6829ab474 | b0abd89a991603e0f9454012b8abfca6c85595bc | /soundrl.py | 230db80dcd1956f23306e4554d974f2bb7f19cf4 | [
"MIT"
] | permissive | AresCyber/Alexander | f05ae24a9ce1a7b0287ce6f128dfe53b6094d4c1 | a76a0f8779db5c84c2b0f8257bf1b1e6deb8e7e8 | refs/heads/main | 2023-01-21T08:05:59.211081 | 2020-12-07T11:37:10 | 2020-12-07T11:37:10 | 319,167,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,901 | py | import speech_recognition as sr
import pyttsx3
from time import sleep
def voicepass(password):
fcount = 0
engine = pyttsx3.init()
engine.setProperty('rate', 150)
engine.setProperty('volume', 0.9)
r = sr.Recognizer()
speech = sr.Microphone(device_index=1)
while fcount != 6:
try:
with speech as source:
audio = r.record(source, duration=4)
recog = r.recognize_google(audio, language = 'en-US')
if recog == password:
engine.say("Access Granted")
access = 1
#print(recog)
engine.runAndWait()
return access
else:
#get fcount
fcount += 1
engine.say("Voice not recognized, please try again, you have " + str(6 - fcount) + "Tries left")
engine.runAndWait()
if fcount == 6:
return 0
except sr.UnknownValueError:
engine.say("Google Speech Recognition could not understand audio")
engine.runAndWait()
except sr.RequestError as e:
engine.say("Could not request results from Google Speech Recognition service; {0}".format(e))
engine.runAndWait()
def playsound(soundbyte):
engine = pyttsx3.init()
engine.setProperty('rate', 150)
engine.setProperty('volume', 0.9)
try:
engine.say(str(soundbyte))
engine.runAndWait()
except:
return
def backdoor(backdoorpass):
fcount = 0
engine = pyttsx3.init()
engine.setProperty('rate', 150)
engine.setProperty('volume', 0.9)
r = sr.Recognizer()
speech = sr.Microphone()
while fcount != 6:
try:
print("starting record")
with speech as source:
#r.adjust_for_ambient_noise(source)
audio = r.record(source, duration=4)
print("Recorded audio")
recog = r.recognize_google(audio, language = 'en-US')
print(recog)
if recog == backdoorpass:
engine.say("password accepted")
engine.runAndWait()
else:
fcount += 1
engine.say("Voice not recognized, please try again, you have " + str(6 - fcount) + "Tries left")
engine.runAndWait()
if fcount == 6:
return 0
except sr.UnknownValueError:
engine.say("couldn't understand audio back try again")
engine.runAndWait()
sleep(1)
except sr.RequestError as e:
engine.say("Could not request results from Google Speech Recognition service; {0}".format(e))
engine.runAndWait()
sleep(1)
#ideally a backup or offline tts module would be placed here
return 0 | [
"31662642+nickalholinna@users.noreply.github.com"
] | 31662642+nickalholinna@users.noreply.github.com |
782514ab58f61ab1d5123c7b936f964f11b8761c | 446275695cb2161ab2b8e79126236e41fa41d781 | /greatest of n numbers.py/greatest of 3 num using if-else.py | 0a18ab01bc78903a0bb4bc16803341598da13d60 | [] | no_license | arshleen18/python | b54591c2790942260fa7d2c0eabace6051d50423 | 9933e5422a171f189662577f361895c984e63b64 | refs/heads/main | 2023-07-01T14:01:47.102240 | 2021-08-04T11:54:09 | 2021-08-04T11:54:09 | 368,593,186 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | #wap to find the greatest of 3 numbers using if-else statement
n1=int(input('enter num1: '))
n2=int(input('enter num2: '))
n3=int(input('enter num3: '))
if n1>n2:
if n1>n3:
print('greatest number is:', n1)
else:
print('greatest number is:', n3)
else:
if n2>n3:
print('greatest number is:', n2)
else:
print('greatest number is:', n3)
| [
"noreply@github.com"
] | noreply@github.com |
77768229229ae4667e63aa8ba2bc58cbdb4d4797 | 865324b144ec2dc598c40cecab05856c36302eca | /passwordCheck.py | d82733f3c263433f4a8416126b4e98b7082c667d | [] | no_license | yashu0598/Code-Signal | 66264cd4c54258fc45f12824f9c1042623cf1dd8 | ac8fa26c4130dd520f1b8385f138f2cf920860a8 | refs/heads/master | 2021-06-03T01:12:24.714897 | 2020-12-12T06:07:14 | 2020-12-12T06:07:14 | 154,334,355 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | """
A password is complex enough, if it meets all of the following conditions:
its length is at least 5 characters;
it contains at least one capital letter;
it contains at least one small letter;
it contains at least one digit.
Determine whether a given password is complex enough.
Example
For inputString = "my.Password123", the output should be
passwordCheck(inputString) = true;
For inputString = "my.password123", the output should be
passwordCheck(inputString) = false.
"""
def passwordCheck(inputString):
if len(inputString)<5:
return False
bi=False
bj=False
bk=False
for i in inputString:
if i>='a' and i<='z':
bi=True
elif i>='A' and i<='Z':
bj=True
elif i>='0' and i<='9':
bk=True
return bi and bj and bk
| [
"noreply@github.com"
] | noreply@github.com |
d88ff67bb52cc64f1e70570aa1c997e0def7e762 | 5dae58198d59db3c11bead79f28d47f45eeb9e5c | /state_manager.py | 03125b616f44c10034ebd771df8c224b3fe97f18 | [] | no_license | Brakahaugen/hex | 65eb66d70df157e89e2d60fafbc18b67b48372e5 | 2921f9208a911af7cbe456bc0a35bffc47ff1846 | refs/heads/master | 2021-04-17T01:09:41.567527 | 2020-04-29T06:02:05 | 2020-04-29T06:02:05 | 249,398,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,421 | py | import time
import copy
import termcolor
import numpy as np
class StateManager:
def __init__(self, size: int = 5):
self.size = size
self.hexNeighbours = [[-1,0],[-1,1],[0,1],[0,-1],[1,-1],[1,0]]
self.random_player = 1
# def get_best_action(distro: list = []):
# best_action = index(max(distro))
def create_initial_state(self, p: int = 0, padding = 0):
"""
Creates a flattened board-string representing an empty board.
If no player is specified, alternates between player 1 and 2
Padding applies n number of padding layers to the model, Making the effective board smaller in size.
"""
if p == 0:
self.random_player = self.random_player % 2 + 1
p = self.random_player
if padding > 0:
board = ["0"] * self.size**3
for i in range(padding):
for j in range(i, self.size):
board[i*self.size + j] = "1" #top
board[(self.size - i - 1)*self.size + j] = "1" #bottom
board[j*self.size + i] = "2" #left
board[(j + 1)*self.size - i - 1] = "2" #right
return str(p) + ''.join(board)
else:
return str(p).ljust(self.size**2 + 1, '0')
def create_padded_states(self, state: str):
"""
Given a winning, padded, board state, find all combinations that would lead to a win
"""
winner = self.is_terminal_state(state)
state_2d = self.state_to_2D(state)
# if string
# for i in range(self.size):
# state_2d[0,i]
# state_2d[self.size,i]
# state_2d[i,0]
# state_2d[i,self.size]
def flip_state(self, state):
"""
returns a mirrored repr. of the board
"""
return state[::-1]
def rotate_state(self, state, k = 1):
# print(type(state))
if isinstance(state, str):
print("haø")
print("heyo")
state = self.state_to_2D(state)
print(state)
return self.flatten_2d(np.rot90(state))
def state_to_2D(self, state: str):
"""
Takes a string state and returns a 2D numpy matrix representing that state
"""
state_list = list(state[1:])
two_dim = np.reshape(state_list, (self.size, self.size))
print(two_dim)
return two_dim
def flatten_2d(self, state_2d: np.array):
"""
takes a 2D numpy matrix and returns a flattened string
"""
state = state_2d.flatten()
return ''.join(state.tolist())
def simulate_move(self, state: str, action: int):
"""
applies the action on the state by placing the players pin on the specified location.
args:
state = board_state in flattened form: "10200020"
action = location to place pin
"""
p = state[0]
state_array = list(state[1:])
state_array[action] = p
#Change player and return string:
return (str(int(p)%2 + 1) + ''.join(state_array))
def get_legal_moves(self, state: str):
"""
returns a list of legal moves as indexes in the state
"""
state_array = list(state[1:])
legal_moves = []
for i in range(len(state_array)):
if state_array[i] == '0':
legal_moves.append(i)
return legal_moves
def is_terminal_state(self, state: str):
"""
Scans the given board to see if there is any link from one side to another.
returns 1 if p1 wins, 2 if p2wins, otherwise: 0
"""
state_array = list(state[1:])
for i in range(self.size):
win = self.check_wins([i,0], '1', state_array.copy())
if win != '0':
return int(win)
win = self.check_wins([0,i], '2', state_array.copy())
if win != '0':
return int(win)
return int(win)
#HELPER FUNCTION FOR IS_TERMINAL STATE
def check_wins(self, cell: list, p: str, state_array: list):
"""
Checks if there is a connected list from the starting cells to the ending cells
"""
if state_array[cell[0] + cell[1]*self.size] == p:
state_array[cell[0] + cell[1]*self.size] = '0'
else:
return '0'
for n in self.hexNeighbours:
if (0 <= cell[0] + n[0] < self.size) and (0 <= cell[1] + n[1] < self.size):
n_cell = [cell[0] + n[0], cell[1] + n[1]]
else:
continue
if state_array[n_cell[0] + n_cell[1]*self.size] == p:
if (p == '1') and (n_cell[1] == self.size - 1):
return '1'
elif (p == '2') and (n_cell[0] == self.size - 1):
return '2'
check_next = self.check_wins(n_cell, p, state_array)
if check_next != '0':
return check_next
return '0'
#HELPER FUNCTIONS FOR PRINTING AND DEBUGGING
#HELPER FUNCTIONS FOR PRINTING IN DEBUGGING
def print_state(self, state):
state_array = list(state[1:])
for row in range(self.size):
print(" ".join(str(state_array[row*self.size + i])+ ' ' +' - '[i < self.size-1] for i in range(0,self.size)))
if row < self.size - 1:
print(" ".join("| " + ' / '[i < self.size-1] for i in range(0,self.size)))
def print_state_array(self, state_array):
for i in range(self.size):
print(state_array[i*self.size:(i+1)*self.size])
print()
if __name__ == "__main__":
s = StateManager(3)
s.create_initial_state(padding=0)
#Make a visualization given a size and a state.
# s.print_state(s.create_initial_state(padding=2))
print()
# s.print_state(s.create_initial_state(padding=2))
print()
str = "1110022220"
s.print_state(s.rotate_state(str,1))
print()
s.print_state(s.rotate_state(str,3))
# print(s.is_terminal_state("12111222121221211"))
# print(s.check_wins([0,0],'2',list("2111222121221211")))
| [
"espenbragerhaug@gmail.com"
] | espenbragerhaug@gmail.com |
c82b4cc15838b566a9c92ee0f9e2ac5b48dae623 | 380dfac9b68ef8663db5a9d1b30fc75636dec3d3 | /billforward/apis/roles_api.py | de864151b5300fd22c12648fd85530ca1110525f | [
"Apache-2.0"
] | permissive | billforward/bf-python | d2f549e0c465d0dc78152b54413cac4216025a64 | d2b812329ca3ed1fd94364d7f46f69ad74665596 | refs/heads/master | 2021-12-13T22:10:16.658546 | 2018-06-19T14:44:45 | 2018-06-19T14:44:45 | 63,268,011 | 2 | 1 | Apache-2.0 | 2021-12-06T12:59:41 | 2016-07-13T17:57:37 | Python | UTF-8 | Python | false | false | 30,334 | py | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class RolesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_role(self, role_request, **kwargs):
"""
Create a new role.
{\"nickname\":\"Create a new role\",\"request\":\"createRoleRequest.html\",\"response\":\"createRoleResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_role(role_request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param BillingEntityBase role_request: (required)
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_role_with_http_info(role_request, **kwargs)
else:
(data) = self.create_role_with_http_info(role_request, **kwargs)
return data
def create_role_with_http_info(self, role_request, **kwargs):
"""
Create a new role.
{\"nickname\":\"Create a new role\",\"request\":\"createRoleRequest.html\",\"response\":\"createRoleResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_role_with_http_info(role_request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param BillingEntityBase role_request: (required)
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role_request']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role_request' is set
if ('role_request' not in params) or (params['role_request'] is None):
raise ValueError("Missing the required parameter `role_request` when calling `create_role`")
resource_path = '/roles'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'role_request' in params:
body_params = params['role_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_all_roles(self, **kwargs):
"""
Retrieves a collection of all roles. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Retrieve all roles\",\"response\":\"getRoleAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_roles(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations:
:param int offset: The offset from the first subscription to return.
:param int records: The maximum number of subscriptions to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired subscriptions should be returned.
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_all_roles_with_http_info(**kwargs)
else:
(data) = self.get_all_roles_with_http_info(**kwargs)
return data
def get_all_roles_with_http_info(self, **kwargs):
"""
Retrieves a collection of all roles. By default 10 values are returned. Records are returned in natural order.
{\"nickname\":\"Retrieve all roles\",\"response\":\"getRoleAll.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_all_roles_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] organizations:
:param int offset: The offset from the first subscription to return.
:param int records: The maximum number of subscriptions to return.
:param str order_by: Specify a field used to order the result set.
:param str order: Ihe direction of any ordering, either ASC or DESC.
:param bool include_retired: Whether retired subscriptions should be returned.
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['organizations', 'offset', 'records', 'order_by', 'order', 'include_retired']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_roles" % key
)
params[key] = val
del params['kwargs']
resource_path = '/roles'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'offset' in params:
query_params['offset'] = params['offset']
if 'records' in params:
query_params['records'] = params['records']
if 'order_by' in params:
query_params['order_by'] = params['order_by']
if 'order' in params:
query_params['order'] = params['order']
if 'include_retired' in params:
query_params['include_retired'] = params['include_retired']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type([])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_role_by_id(self, role, **kwargs):
"""
Retrieves a single role, specified by the ID parameter.
{\"nickname\":\"Retrieve a role\",\"response\":\"getRoleByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_role_by_id(role, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param list[str] organizations:
:param bool include_retired: Whether retired subscriptions should be returned.
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_role_by_id_with_http_info(role, **kwargs)
else:
(data) = self.get_role_by_id_with_http_info(role, **kwargs)
return data
def get_role_by_id_with_http_info(self, role, **kwargs):
"""
Retrieves a single role, specified by the ID parameter.
{\"nickname\":\"Retrieve a role\",\"response\":\"getRoleByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_role_by_id_with_http_info(role, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param list[str] organizations:
:param bool include_retired: Whether retired subscriptions should be returned.
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role', 'organizations', 'include_retired']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_role_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role' is set
if ('role' not in params) or (params['role'] is None):
raise ValueError("Missing the required parameter `role` when calling `get_role_by_id`")
resource_path = '/roles/{role}'.replace('{format}', 'json')
path_params = {}
if 'role' in params:
path_params['role'] = params['role']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
if 'include_retired' in params:
query_params['include_retired'] = params['include_retired']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def remove_permission_from_role(self, role, resource, action, **kwargs):
"""
Revokes a particular permission
{\"nickname\":\"Remove Permission from role\",\"response\":\"removePermissionFromGroup.html\",\"request\":\"removePermissionFromGroupRequest.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_permission_from_role(role, resource, action, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param str resource: (required)
:param str action: (required)
:param list[str] organizations:
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.remove_permission_from_role_with_http_info(role, resource, action, **kwargs)
else:
(data) = self.remove_permission_from_role_with_http_info(role, resource, action, **kwargs)
return data
def remove_permission_from_role_with_http_info(self, role, resource, action, **kwargs):
"""
Revokes a particular permission
{\"nickname\":\"Remove Permission from role\",\"response\":\"removePermissionFromGroup.html\",\"request\":\"removePermissionFromGroupRequest.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_permission_from_role_with_http_info(role, resource, action, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param str resource: (required)
:param str action: (required)
:param list[str] organizations:
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role', 'resource', 'action', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method remove_permission_from_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role' is set
if ('role' not in params) or (params['role'] is None):
raise ValueError("Missing the required parameter `role` when calling `remove_permission_from_role`")
# verify the required parameter 'resource' is set
if ('resource' not in params) or (params['resource'] is None):
raise ValueError("Missing the required parameter `resource` when calling `remove_permission_from_role`")
# verify the required parameter 'action' is set
if ('action' not in params) or (params['action'] is None):
raise ValueError("Missing the required parameter `action` when calling `remove_permission_from_role`")
resource_path = '/roles/{role}/permission/{resource}/{action}'.replace('{format}', 'json')
path_params = {}
if 'role' in params:
path_params['role'] = params['role']
if 'resource' in params:
path_params['resource'] = params['resource']
if 'action' in params:
path_params['action'] = params['action']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def revoke_role(self, role, **kwargs):
"""
Revokes a role
{\"nickname\":\"Revoke role\",\"response\":\"revokeRole.html\",\"request\":\"revokeRoleRequest.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.revoke_role(role, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param list[str] organizations:
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.revoke_role_with_http_info(role, **kwargs)
else:
(data) = self.revoke_role_with_http_info(role, **kwargs)
return data
def revoke_role_with_http_info(self, role, **kwargs):
"""
Revokes a role
{\"nickname\":\"Revoke role\",\"response\":\"revokeRole.html\",\"request\":\"revokeRoleRequest.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.revoke_role_with_http_info(role, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str role: ID or name of the role. (required)
:param list[str] organizations:
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method revoke_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role' is set
if ('role' not in params) or (params['role'] is None):
raise ValueError("Missing the required parameter `role` when calling `revoke_role`")
resource_path = '/roles/{role}'.replace('{format}', 'json')
path_params = {}
if 'role' in params:
path_params['role'] = params['role']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def update_role(self, role_request, **kwargs):
"""
Update a role.
{\"nickname\":\"Update a role\",\"request\":\"updateRoleRequest.html\",\"response\":\"updateRoleResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_role(role_request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param UpdateRoleRequest role_request: (required)
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_role_with_http_info(role_request, **kwargs)
else:
(data) = self.update_role_with_http_info(role_request, **kwargs)
return data
def update_role_with_http_info(self, role_request, **kwargs):
"""
Update a role.
{\"nickname\":\"Update a role\",\"request\":\"updateRoleRequest.html\",\"response\":\"updateRoleResponse.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_role_with_http_info(role_request, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param UpdateRoleRequest role_request: (required)
:return: RolePagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['role_request']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_role" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'role_request' is set
if ('role_request' not in params) or (params['role_request'] is None):
raise ValueError("Missing the required parameter `role_request` when calling `update_role`")
resource_path = '/roles'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'role_request' in params:
body_params = params['role_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/xml', 'application/xml', 'application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RolePagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| [
"alexander.birch@billforward.net"
] | alexander.birch@billforward.net |
1f807950196eab859ffa0f58de11ee271eca3ad7 | 2fcd0ffb60632dc831542f12eed6c334f1e888d3 | /api/tests/test_models.py | c18c91207d43d716bcdc3c092677d4596219d1bd | [] | no_license | ncrousset/mainDoctorNote | 49daaff0819de57ecf3ac5dceade302de96dc79c | bdaf98bd7babe0da97e1394c5b94fa10253eddb6 | refs/heads/main | 2023-08-25T02:05:02.373649 | 2021-10-22T16:49:30 | 2021-10-22T16:49:30 | 406,190,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,677 | py | from django.db import models
from django.test import TestCase
from django.contrib.auth import get_user_model
from api.models import MedicalStudy, MedicalTreatment, Patient, Background, MedicalHistory
from django.urls import reverse
def create_user():
return get_user_model().objects.create_user(
username='testuser',
email='testuser@gmail.com',
password='test')
def create_patient(user):
return Patient.objects.create(
first_name='Natanael',
last_name='Acosta',
sex='m',
user=user)
class PatientTest(TestCase):
def setUp(self):
self.patient = create_patient(create_user())
def test_object_name_is_last_name_comma_first_name(self):
expected_object_name = f'{self.patient.first_name} {self.patient.last_name}'
self.assertEquals(expected_object_name, str(self.patient))
def test_get_absolute_url(self):
self.assertEqual(self.patient.get_absolute_url(),
reverse('patient', kwargs={"pk": self.patient.id}))
def test_get_sex_title(self):
sex_title = {'m': 'masculine',
'f': 'feminine',
'o': 'other'}
if self.patient.sex == '':
expected = ''
else:
expected = sex_title[self.patient.sex]
self.assertEqual(expected, self.patient.sex_title)
def test_get_full_name(self):
self.assertEqual(
f'{self.patient.first_name} {self.patient.last_name}', self.patient.full_name)
class BackgroundTest(TestCase):
def setUp(self):
self.background = Background.objects.create(
title='Background title',
content='Hola ',
patient=create_patient(create_user())
)
def test_object_name_is_title(self):
self.assertEquals(self.background.title, 'Background title')
def test_get_absolute_url(self):
self.assertEqual(self.background.get_absolute_url(),
'/api/patient/background/' + str(self.background.id))
class MedicalHistoryTest(TestCase):
def setUp(self):
self.medical_history = MedicalHistory.objects.create(
title='Medical history',
content='Hola ',
patient=create_patient(create_user())
)
def test_object_name_is_title(self):
self.assertEquals(self.medical_history.title, 'Medical history')
def test_get_absolute_url(self):
self.assertEqual(self.medical_history.get_absolute_url(),
'/api/patient/medical-history/' + str(self.medical_history.id))
class MedicalStudyTest(TestCase):
def setUp(self):
self.medical_study = MedicalStudy.objects.create(
title='Medical study',
content='Hola ',
patient=create_patient(create_user())
)
def test_object_name_is_title(self):
self.assertEquals(self.medical_study.title, 'Medical study')
def test_get_absolute_url(self):
self.assertEqual(self.medical_study.get_absolute_url(),
'/api/patient/medical-study/' + str(self.medical_study.id))
class MedicalTreatmentTest(TestCase):
def setUp(self):
self.medical_treatment = MedicalTreatment.objects.create(
title='Medical treatment',
content='Hola ',
patient=create_patient(create_user())
)
def test_object_name_is_title(self):
self.assertEquals(self.medical_treatment.title, 'Medical treatment')
def test_get_absolute_url(self):
self.assertEqual(self.medical_treatment.get_absolute_url(),
'/api/patient/medical-treatment/' + str(self.medical_treatment.id)) | [
"natanael926@gmail.com"
] | natanael926@gmail.com |
4993e813cca3a6c0f29f72c606f7d8504040e365 | ac737323d19a04047cd157534553a7744acfabdb | /daily_coding_problem/sliding_max_window.py | 870277cc5407677558d3821d0f2a5bcbb8578e87 | [] | no_license | sahaia1/coursera_algos | 57a36b56a2fb1077a0806e0d07f8531f6ce35e48 | d1c4f5b374aea6fdd1ee225c3cebf25b39073cae | refs/heads/master | 2020-05-03T17:39:45.943959 | 2019-07-17T15:23:32 | 2019-07-17T15:23:32 | 178,748,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,277 | py | '''
Problem # 18
Good morning! Here's your coding interview problem for today.
This problem was asked by Google.
Given an array of integers and a number k, where 1 <= k <= length of the array, compute the maximum values of each subarray of length k.
For example, given array = [10, 5, 2, 1, 8, 7] and k = 3, we should get: [10, 7, 8, 8], since:
10 = max(10, 5, 2)
7 = max(5, 2, 7)
8 = max(2, 7, 8)
8 = max(7, 8, 7)
Do this in O(n) time and O(k) space. You can modify the input array in-place and you do not need to store the results. You can simply print them out as you compute them.
'''
from heapq import heappush, heappop
# My Solution using a heap. Passes Leetcode but slower than the next the solution
class Solution:
def maxSlidingWindow(self, nums, k):
l, r = 0, 0
heap = []
output = []
while r < len(nums):
num = nums[r]
heappush(heap, (-1 * num, r))
if r - l + 1 == k:
num, _ = heap[0]
output.append(-1 * num)
while heap and heap[0][1] <= l:
heappop(heap)
l += 1
r += 1
return output
# Leetcode solution
from collections import deque
class Solution2:
def maxSlidingWindow(self, nums: 'List[int]', k: 'int') -> 'List[int]':
# base cases
n = len(nums)
if n * k == 0:
return []
if k == 1:
return nums
def clean_deque(i):
# remove indexes of elements not from sliding window
if deq and deq[0] == i - k:
deq.popleft()
# remove from deq indexes of all elements
# which are smaller than current element nums[i]
while deq and nums[i] > nums[deq[-1]]:
deq.pop()
# init deque and output
deq = deque()
max_idx = 0
for i in range(k):
clean_deque(i)
deq.append(i)
# compute max in nums[:k]
if nums[i] > nums[max_idx]:
max_idx = i
output = [nums[max_idx]]
# build output
for i in range(k, n):
clean_deque(i)
deq.append(i)
output.append(nums[deq[0]])
return output | [
"adityasahai@WHS-1334.local"
] | adityasahai@WHS-1334.local |
5913906913996061e38731e8c5765a849316493e | 4dbf730249e343fa424bb5202fd67e7570d44784 | /example/controller/tests/controller/methods/__init__.py | 05536fc710eb4fe6bfa598b4b2f7e8a761bea971 | [
"MIT"
] | permissive | why2pac/dp-tornado | 76da68df3c6082fce8d381da90eab526f064f38a | 4a73b9fd81448c195146ea2003ac5aa88ae792d9 | refs/heads/master | 2021-01-24T06:02:21.785754 | 2020-11-14T07:39:45 | 2020-11-14T07:39:45 | 25,281,793 | 19 | 11 | MIT | 2020-02-28T07:56:24 | 2014-10-16T01:50:49 | Python | UTF-8 | Python | false | false | 126 | py | # -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller
class MethodsController(Controller):
pass
| [
"me@dp.farm"
] | me@dp.farm |
e3f7f37be4044ecc2be0cf56b77dcda369682d5b | f377d304987293a2c957699d7228587513982180 | /python/mig/run_engine.py | aa56ae0d475482a11026f952a941525102def2c6 | [] | no_license | georgeliu95/trt-practices | e0e048658cf15d8586b7021ca039a05d1b585573 | 9c1f0a0f70335b1c56afb397abc939dada2c737c | refs/heads/main | 2023-06-01T23:36:14.691387 | 2021-06-18T03:30:20 | 2021-06-18T03:30:20 | 361,679,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,921 | py | import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
# import nvtx
import os
import sys
sys.path.insert(0, '..')
from common import HostDeviceMem, BuilderCreationFlag
from common import allocate_buffer, printIOInfo, get_trt_type
# CUDA Context Init
cuda.init()
CURRENT_DEV = cuda.Device(0)
ctx = CURRENT_DEV.make_context()
ctx.push()
# -------------------------------------------------------------------------------- #
# Global Variables
BENCH = False
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
WORKSPACE_SIZE = 1<<30
BATCH_MODE = 1<<int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) if True else 0
builder_flag = BuilderCreationFlag()
output_tensor_names = []
engine_file_name = "../../engines/" + "build_engine." + "trt"
def trt_execute(context, input_data):
# Execution
inputs, outputs, bindings, stream = allocate_buffer(context)
# Copy data to Host Mem
[np.copyto(inputs[0].host, input_data.ravel().astype(inputs[0].host.dtype))]
# Copy data to Device Mem
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
if BENCH:
for i in range(100):
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
stream.synchronize()
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
stream.synchronize()
return inputs, outputs, bindings, stream
# -------------------------------------------------------------------------------- #
# Data Preparation
input_dtype = np.float32
input_shape = [3,2,5,5]
input_data = np.arange(start=0,stop=np.prod(input_shape),dtype=input_dtype).reshape(input_shape)
print("Input:\n", input_data if input_data.size < 50 else input_data[:][:][:min(5,input_shape[-2])][:min(5,input_shape[-1])])
if os.path.isfile(engine_file_name):
with open(engine_file_name, "rb") as f:
engine_bs = f.read()
des_engine = trt.Runtime(TRT_LOGGER).deserialize_cuda_engine(engine_bs)
des_msg = "[Deserialize engine] " + ("Fail to load engine." if des_engine is None else "Succeed to load engine.")
print(des_msg)
des_context = des_engine.create_execution_context()
des_context.set_binding_shape(0, input_data.shape) # Only one input tensor here.
# rng = nvtx.start_range(message="execution_phase", color="blue")
_, outputs, _, _ = trt_execute(des_context, input_data)
# nvtx.end_range(rng)
[print("Output_"+str(id)+":\n", outputs[id].host.reshape(des_context.get_binding_shape(des_engine.get_binding_index(it)))) for id,it in enumerate(output_tensor_names)]
# Destroy Execution Context and CUDA Engine
#with context:
# print("destroy context")
#with engine:
# print("destroy engine")
#with des_context:
# print("destroy des_context")
#with des_engine:
# print("destroy des_engine")
print("All done.")
# Pop CUDA Context
ctx.pop()
ctx.detach() | [
"georgel@nvidia.com"
] | georgel@nvidia.com |
6b264e84c815a0a97ad9b274543b9297d9fc6fbe | 3bf1480a1a00209bc8ef8a66e1995549987ae70e | /utils/scripts/OOOlevelGen/src/sprites/Alert.py | 130ac296aa9ea69dea9f1d910487db975a93b603 | [
"MIT"
] | permissive | fullscreennl/bullettime | 284a8ea320fb4adabc07c3639731a80fc4db5634 | 8967449cdf926aaed6bb7ec217d92e0689fb0c3c | refs/heads/master | 2020-03-29T01:56:26.627283 | 2018-10-11T19:09:48 | 2018-10-11T19:09:48 | 149,414,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,120 | py | import PhysicsMixin
import ID
BODIES = """
<dict>
<key>body</key>
<dict>
<key>x</key>
<integer>%(x)s</integer>
<key>y</key>
<integer>%(y)s</integer>
<key>width</key>
<integer>%(width)s</integer>
<key>height</key>
<integer>%(height)s</integer>
<key>sheet_id</key>
<integer>5</integer>
<key>id</key>
<integer>%(__objID__)s</integer>
<key>name</key>
<string>%(name)s</string>
<key>classname</key>
<string>%(classname)s</string>
<key>static</key>
<true/>
<key>spritedata</key>
<string>%(msg)s</string>
</dict>
<key>shapes</key>
<array>
<dict>
<key>x</key>
<integer>0</integer>
<key>y</key>
<integer>0</integer>
<key>width</key>
<integer>%(width)s</integer>
<key>height</key>
<integer>%(height)s</integer>
<key>type</key>
<string>rect</string>
<key>friction</key>
<real>1</real>
<key>density</key>
<integer>1</integer>
<key>restitution</key>
<real>0</real>
<key>sensor</key>
<true/>
</dict>
</array>
</dict>
"""
JOINTS = """"""
CONTACTS = """
<dict>
<key>sprite1</key>
<string>:body</string>
<key>sprite2</key>
<string>%(name)s</string>
<key>eventName</key>
<string>onAlertHit</string>
</dict>
"""
class Alert(PhysicsMixin.PhysicsMixin):
def __init__(self,**kwargs):
self.params = kwargs
#self.params['name'] = "Alert"
self.process(kwargs)
self.addDefault('classname','')
self.addDefault('msg','Hello
fWorld!')
self.addDefault('name','Alert')
self.params['__objID__'] = ID.next()
def render(self):
return( BODIES%self.params, JOINTS%self.params,CONTACTS%self.params)
if __name__ == "__main__":
print Alert(x=100,y=160,width=50,height=320,zoom_fact=3.0,name="ImageAlert").render()[0]
| [
"github@fotoboer.nl"
] | github@fotoboer.nl |
1894e6547d9c63f56c0ebfdae27f4f7767c8f13f | ddbddc6c1c1689ebb9430d4a495466763a231749 | /Python_Learning/Python_fundementals/05_data_types_booleans.py | 394aeb2899319f69ef94c1d5410bdddac587d33d | [] | no_license | adamkoy/Learning_Python | bd1e4d69f0bc05a2a0f8ffc05c0067081f58a225 | 2216fa211bdf21564871893ad01fed0566bf5fa7 | refs/heads/master | 2020-06-20T15:10:53.124722 | 2019-07-26T11:00:03 | 2019-07-26T11:00:03 | 197,160,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | # Booleans
#Boolean is a data type that is either true and false
#Syntax is capital letter
var_true = True
var_false= False
print(type(var_true))
print(type(var_false))
# When we equate /evaluate something we get a boolean as a response
#Logical operators return boolean
#== / != / <> />= /<=
weather = 'Rainy'
print(weather =='Sunny')
print(weather =='Rainy')
#Logical **AND** & **OR**
#Evaluate two sides, and BOTH have to be true for it to return True
print(True and False)
print(weather == 'Rainy' and weather == 'Sunny')
print(weather == 'Rainy' and weather == 'Rainy')
print(True and True)
#Logical OR - One of the side of the equasion have to be True to return True
print('>Testing logical or:')
print(True or False)
print(False or False)
#Some methods or functions can return booleans
potential_number = '10'
print('hey')
print(potential_number.isnumeric())
print('Location in code 2!')
text = 'Hello world!'
print(text[0] == 'H')
print(text.startswith('H'))
print(text.startswith('h'))
print('Testing .endswith(arg)')
print(text[-1] == '!') # String are a list of characters. -1 represents the last index in the said list
print(text.endswith('!'))
print(text.endswith('?'))
#Booleans and numbers - anything that is above 1 is Treu
print("printing bool values of numbers")
print(bool(0))
print (bool(13))
print (bool(1))
print (bool(1+3j))
print(bool(3.14))
#Value of none
print(bool (None))
| [
"adam.koyuncu@live.com"
] | adam.koyuncu@live.com |
351f10db84028c7b90967a57fd7c5947cf1c2ff1 | 4a1b61cf551db7843050cc7080cec6fd60c4f8cc | /2020/백준문제/트리/00_트리.py | bc90198b1bcad51c6c1ca207c0bc74de3b890221 | [] | no_license | phoenix9373/Algorithm | 4551692027ca60e714437fd3b0c86462f635d8ff | c66fd70e14bb8357318e8b8f386d2e968f0c4d98 | refs/heads/master | 2023-08-24T10:01:20.798430 | 2021-10-15T07:57:36 | 2021-10-15T07:57:36 | 288,092,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | import sys
sys.stdin = open('input_00.txt', 'r')
def preorder(n):
if n: # 트리가 존재하면, 0이 아니면.
print(n, end=' ')
preorder(tree[n][0])
preorder(tree[n][1])
def inorder(n):
if n: # 트리가 존재하면, 0이 아니면.
preorder(tree[n][0])
print(n, end=' ')
preorder(tree[n][1])
def postorder(n):
if n: # 트리가 존재하면, 0이 아니면.
preorder(tree[n][0])
preorder(tree[n][1])
print(n, end=' ')
# 트리 입력받기.
N = int(input()) # 노드의 수.
E = 12 # 간선의 수.
tree = [[0, 0, 0] for _ in range(N + 1)]
arr = list(map(int, input().split()))
for i in range(E):
if tree[arr[2 * i]][0] == 0:
tree[arr[2 * i]][0] = arr[2 * i + 1]
else:
tree[arr[2 * i]][1] = arr[2 * i + 1]
tree[arr[2 * i + 1]][2] = arr[2 * i]
print(arr)
preorder(1)
print()
inorder(1)
print()
postorder(1)
| [
"phoenix9373@naver.com"
] | phoenix9373@naver.com |
3b7e228cc7b10cf7e42e8765eabbe4009f2a6e69 | 847a0217508a39502509ebab74dfcd626180137a | /NTU/CZ4015/Evaluation.py | b9b38f07ddeca82e3f1cb5d7cc54ea482af966d4 | [
"MIT"
] | permissive | JahodaPaul/FIT_CTU | 757c5ef4b47f9ccee77ce020230b7309f325ee57 | 2d96f18c7787ddfe340a15a36da6eea910225461 | refs/heads/master | 2021-06-04T20:16:44.816831 | 2021-03-13T10:25:06 | 2021-03-13T10:25:06 | 133,488,802 | 29 | 20 | null | null | null | null | UTF-8 | Python | false | false | 2,780 | py | import pickle
import matplotlib.pyplot as plt
import numpy as np
#This class is used for evaluating and processing the results of the simulations
class Evaluation:
def __init__(self):
self.results = []
def ProcessResults(self,n_of_blocked_calls, n_of_dropped_calls, n_of_calls, n_of_channels_reverved):
self.results.append([n_of_blocked_calls,n_of_dropped_calls,n_of_calls,n_of_channels_reverved])
pickle.dump(self.results, open("results.p", "wb"))
def Evaluate(self):
titleSize = 20
labelSize = 16
self.results = pickle.load(open("results.p", "rb"))
y_d_0 = []
x_d_0 = []
y_d_1 = []
x_d_1 = []
y_b_0 = []
x_b_0 = []
y_b_1 = []
x_b_1 = []
for item in self.results:
if item[3] == 1:
y_d_1.append(item[1])
y_b_1.append(item[0])
x_d_1.append(1)
x_b_1.append(1)
else:
y_d_0.append(item[1])
y_b_0.append(item[0])
x_d_0.append(0)
x_b_0.append(0)
plt.scatter(x_d_1, y_d_1, c='blue')
plt.scatter(x_b_1, y_b_1, c='red')
plt.scatter(x_d_0, y_d_0, c='blue',label='Dropped calls')
plt.scatter(x_b_0, y_b_0, c='red',label='Blocked calls')
plt.title('Dropped/Blocked calls based on reserved channels', size=titleSize)
plt.ylabel('Number of calls out of 10 000', size=labelSize)
plt.xlabel('Number of reserved channels for handover', size=labelSize)
plt.legend()
plt.show()
ax1 = plt.subplot(211)
plt.hist(y_d_1, color='blue',label='Dropped calls')
plt.hist(y_b_1, color='red',label='Blocked calls')
plt.title('Dropped/Blocked calls with reserved channel for handover', size=titleSize)
plt.xlabel('Number of calls out of 10 000', size=labelSize)
plt.ylabel('Number of occurrences', size=labelSize)
plt.legend()
plt.subplot(212,sharex=ax1)
plt.hist(y_d_0, color='blue',label='Dropped calls')
plt.hist(y_b_0, color='red',label='Blocked calls')
plt.title('Dropped/Blocked calls with no channel reservation', size=titleSize)
plt.xlabel('Number of calls out of 10 000', size=labelSize)
plt.ylabel('Number of occurrences', size=labelSize)
plt.legend()
plt.show()
print('Average number of dropped calls (1 reserved channel):', np.mean(y_d_1))
print('Average number of blocked calls (1 reserved channel):', np.mean(y_b_1))
print('Average number of dropped calls (0 reserved channel):', np.mean(y_d_0))
print('Average number of blocked calls (0 reserved channel):', np.mean(y_b_0)) | [
"pavel.jahoda3@seznam.cz"
] | pavel.jahoda3@seznam.cz |
a304eeea8ec9fb9ef6778dda33ad0fb333e0f628 | 7b05844959a00ac74066c5707438ef8809d25e85 | /scripts/python_module_to_matlab_import_str | f048a397677f7f6a829b77141a3f6fc6f1385bac | [] | no_license | jkwasnio/ML-X-assist | d7d47daef05392d154db13de27389fb9d25b86df | 6501674ce24b9f7250c33c44ec97bd9b8700a8fd | refs/heads/master | 2020-09-01T12:01:52.863691 | 2020-02-26T19:30:00 | 2020-02-26T19:30:00 | 218,954,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | #!/usr/bin/env python
# SYNTAX: python python_module_to_matlab_import_str.py PYTHON_MODULE_NAME
# Converts a python module (.py) into a string ready to be imported into a
# matlab environment. (Useful to 'convert' a .py into a .mat)
# NOTE: Supports int, float and str only!
import sys
import numpy as np
module_name = sys.argv[1]
# define conversion from object to string by type
converter = {}
# shortcut for conversion
def convert(x):
s = converter[type(x)](x)
if s is None:
return "'FAILED IMPORT'"
return s
# register converters
converter[int] = lambda i: str(i)
converter[np.int8] = lambda i: str(i)
converter[np.int16] = lambda i: str(i)
converter[np.int32] = lambda i: str(i)
converter[np.int64] = lambda i: str(i)
converter[float] = lambda f: str(f)
converter[np.float16] = lambda f: str(f)
converter[np.float32] = lambda f: str(f)
converter[np.float64] = lambda f: str(f)
converter[np.complex] = lambda z: str(z)
converter[np.complex64] = lambda z: str(z)
converter[np.complex128] = lambda z: str(z)
converter[str] = lambda s: "'" + str(s) + "'"
converter[list] = lambda l: "[" + " ".join([convert(e) for e in l]) + "]"
def convert_np_nd_array(a):
if len(a.shape) > 2:
return # unsupported
# columns must be separated by spaces
separator = ";"
# coefficients must be separated by spaces
if len(a.shape) == 1:
separator = " "
return "[" + separator.join([convert(e) for e in a]) + "]"
converter[np.ndarray] = convert_np_nd_array
# TODO: extend
supported_types = converter.keys()
# import the module
module = __import__(module_name)
# list attributes to be forwarded
attribute_names = dir(module)
# filter by name (exclude private and built-in varibales)
attribute_names = [n for n in attribute_names if n[0] != "_"]
# filter by type (supported types only)
attribute_names = [
n for n in attribute_names if type(getattr(module, n)) in supported_types
]
# generate import string
matlab_import_str = ""
for n in attribute_names:
value = getattr(module, n)
value_str = convert(value)
matlab_import_str += n + " = " + value_str + ";"
# output matlab import string
sys.stdout.write(matlab_import_str)
| [
"57213062+jkwasnio@users.noreply.github.com"
] | 57213062+jkwasnio@users.noreply.github.com | |
f32468540c2660589bb48972b342c3055c197c3b | 374c3997dcad3dbc64f42b57c518b8f10db84b68 | /plotting/config_fast.py | eb2deb21c7e56017db8927b8213f6338a253d6f1 | [] | no_license | nichrome-project/nichrome | 69a5887c49fd55f67b1cc381127d907ae459a12e | cc4eafa3c58e1175134392ffe7fe2e2ffb6b233f | refs/heads/master | 2021-01-12T00:17:04.924279 | 2019-02-18T17:09:34 | 2019-02-18T17:09:34 | 78,697,844 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,913 | py | benchmarks = ['raytracer','montecarlo', 'sor', 'elevator', 'jspider', 'ftp', 'hedc', 'weblech']
raceConfigs = ['staticAggr', 'staticCons', 'dynamic', 'bagged', 'oracle', 'uniform']
ptsConfigs = ['dynamic','oracle']
runFolder = '../main/ursa_bench'
dataFolder = '../data'
plotFolder = '../plots'
backupFolder = '../backup'
classifiers = ['oracle', 'bagged', 'dynamic', 'staticCons', 'staticAggr']
raceExprMachineMap = {'oracle':'fir04', 'dynamic':'fir05', 'staticAggr':'fir07', 'staticCons':'fir06', 'bagged':'fir08'}
raceBaselineMachineMap = {'raytracer':'fir08','montecarlo':'fir07','sor':'fir05','elevator':'fir05', 'jspider':'fir08', 'ftp':'fir07', 'hedc':'fir06', 'weblech':'fir06'}
config_label_map = {'oracle':'ideal', 'bagged':'aggregated', 'dynamic':'dynamic', 'staticCons':'static_optimistic', 'staticAggr':'static_pessimistic', 'uniform':'baseline'}
ptsExprMachineMap = {
('raytracer','oracle'):'fir03',
('raytracer','dynamic'):'fir03',
('elevator','oracle'): 'fir03',
('elevator','dynamic'):'fir03',
('jspider','oracle'):'fir11',
('jspider','dynamic'):'fir11',
('ftp','oracle'):'fir13',
('ftp','dynamic'):'fir08',
('hedc','oracle'):'fir06',
('hedc','dynamic'):'fir03',
('weblech','oracle'):'fir12',
('weblech','dynamic'):'fir04',
('montecarlo','oracle'):'fir03',
('montecarlo','dynamic'):'fir03',
('sor','oracle'):'fir03',
('sor','dynamic'):'fir03',
}
def getRaceExprLocs():
ret = {}
for config, machine in raceExprMachineMap.iteritems():
for bench in benchmarks:
ret[(bench,config)] = machine
for bench in benchmarks:
ret[(bench,'uniform')] = raceBaselineMachineMap[bench]
return ret
def getPtsExprLocs():
return ptsExprMachineMap
def configToLabel(config):
return config_label_map[config]
| [
"xin@Xins-MacBook-Pro.local"
] | xin@Xins-MacBook-Pro.local |
183fc2b5c228e64de1cc3a850c8daf99131d5c99 | 9a3c820c630cd11a804dbe95be97d2c778659175 | /test3.py | 650e9fb5b9e668300e2782267bac03851a41033b | [] | no_license | harshramandas/terrabluext | 297d1aaf5e187c268e51ed63165e4f18fdb30a88 | 0ca1e2e0349b33ec3a35962bac8a5e2e203d52a9 | refs/heads/master | 2020-12-03T07:29:58.703393 | 2020-01-02T13:32:08 | 2020-01-02T13:32:08 | 231,242,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | import operator
n = int(input("Enter no. of rows/records: "))
A = []
for i in range(n):
j = tuple(input().split(','))
A.append(j)
A.sort(key = operator.itemgetter(0, 1, 2))
print(A) | [
"harshramandas@gmail.com"
] | harshramandas@gmail.com |
71d9e46ae01ebb5722197a65552fcc1e69c6a89e | 5f1f5b85be3e70fb199a28ff8673b634e6e3cb52 | /app.py | ce2f6d118c920447ddf35003b541a2eee152a16a | [] | no_license | BishnoiJitender/Fake-News-Detection | 9dc33de88a54783d743b240ce5d507525005d78b | 6727741b762b4e077a221beed41224925e58b98d | refs/heads/main | 2023-06-26T16:27:34.359321 | 2021-07-15T05:33:56 | 2021-07-15T05:33:56 | 385,855,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 20:38:00 2021
@author: Jitender Bishnoi Rahar
"""
import sys
sys.path.append(r'c:\users\dell\appdata\local\programs\python\python39\lib\site-packages')
import numpy as np
import sklearn
from flask import Flask, request, render_template
from flask_cors import CORS
import os
import joblib
import pickle
import flask
import os
import newspaper
from newspaper import Article
import urllib
import nltk
nltk.download('punkt')
#Loading Flask and assigning the model variable
app=Flask(__name__)
CORS(app)
app=flask.Flask(__name__,template_folder='templates')
with open('model.pkl','rb') as handle:
model=pickle.load(handle)
@app.route('/')
def main():
return render_template('index.html')
#Recieving the input url from the user and using Web Scrapping to extract the news content
@app.route('/predict',methods=['GET','POST'])
def predict():
url=request.get_data(as_text=True)[5:]
url=urllib.parse.unquote(url)
article=Article(str(url))
article.download()
article.parse()
article.nlp()
news=article.summary
#Passing the news article to the model and returning whether it's FAKE or REAL
pred=model.predict([news])
return render_template('index.html', prediction_text='The news is "{}"'.format(pred[0]))
if __name__=="__main__":
port=int(os.environ.get('PORT',5000))
app.run(port=port,debug=True,use_reloader=False)
| [
"jitenderbishnoi37@gmail.com"
] | jitenderbishnoi37@gmail.com |
5b9672e6cef01ce06379b6507bf9ca3a29c00902 | cb68603da2af46ac4841be9a5479c08186efafb2 | /hw1/kaplanmeiertest.py | a20770b53c443bc96b389b4bb696543e1f24c6dc | [] | no_license | kingledion/stat778 | 00d4125e3ebf5b7b862a0f01fc8737d659cb1749 | 62ae92dd151f96ca4658d81e5d99a744a20730bd | refs/heads/master | 2021-05-05T02:52:47.341901 | 2018-05-09T22:27:35 | 2018-05-09T22:27:35 | 119,771,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 17:02:54 2018
@author: dhartig
"""
from lifelines import KaplanMeierFitter
import csv
times = []
events = []
with open("/opt/school/stat778/hw1/HW1.dat") as csvin:
rdr = csv.reader(csvin, delimiter=' ')
for time, flag in rdr:
times.append(float(time))
events.append(int(flag))
kmf = KaplanMeierFitter()
kmf.fit(times, event_observed = events)
print(kmf.survival_function_) | [
"kingledion@gmail.com"
] | kingledion@gmail.com |
f3f99dc39371b1d6677cd7be07c263496a984d3a | 1cc726e5c483db05d8f9f31674d424b60824f597 | /tests/__init__.py | 8872496718b43efe4f6612ca1936404574038c2a | [] | no_license | patry-jh/empresa-0.3 | ac3415744fe172591b2d8f867bf8eb99ba789024 | c8ecbcece14cbb83082bed862909f97ae1ecfcb4 | refs/heads/master | 2021-01-21T00:16:24.035031 | 2014-12-05T16:52:48 | 2014-12-05T16:52:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21 | py | __author__ = 'Patry'
| [
"patricia_j_h@hotmail.com"
] | patricia_j_h@hotmail.com |
0cc5253cebc09fd4951eaa0a8e6114bad0c1e75b | 8b05073c8bd4c61df78acb10dcd00962cad5d794 | /amlink/socket_server.py | 432345ab4847f6f3597c6e5a7902ec2d082858a0 | [] | no_license | gundamMC/animius-link | bc97610883810c0d64ff744243474342b323c318 | 2ba71d6485282deeed6beb3329e5fd047485fd05 | refs/heads/master | 2022-12-13T22:37:04.994630 | 2019-10-23T08:16:42 | 2019-10-23T08:16:42 | 176,075,906 | 0 | 0 | null | 2022-12-08T05:06:37 | 2019-03-17T08:38:46 | Python | UTF-8 | Python | false | false | 2,425 | py | import asyncio
from amlink import utils, connect_engine, network_controller
class SocketServer:
def __init__(self, link_port, local, pwd, max_clients, engine_ip, engine_port):
self.host = '127.0.0.1' if local else '0.0.0.0'
self.link_port = link_port
self.engine_ip = engine_ip
self.engine_port = engine_port
self.pwd = pwd
self.max_clients = max_clients
self.engine = self.server = None
self.pending_requests = {}
async def main(self):
self.engine = connect_engine.Connect(self.engine_ip, self.engine_port, self.pwd, self, False)
self.server = await asyncio.start_server(self.handle_connection, self.host, self.link_port)
await self.engine.connect()
def stop_server(self):
self.server._shutdown_request = True
async def await_auth(self, reader):
auth_info = await network_controller.NetworkProtocol.await_receive(reader)
request_id, command, arguments = network_controller.NetworkProtocol.parse_request(auth_info)
if command == 'login' and arguments['pwd'] == self.pwd:
return True, "success", arguments['username']
else:
return False, "invalid password", None
async def handle_connection(self, reader, writer):
valid_session, auth_message, username = await self.await_auth(reader)
while valid_session:
raw_request = await network_controller.NetworkProtocol.await_receive(reader)
request_id, command, arguments = network_controller.NetworkProtocol.parse_request(raw_request)
local_commands = {'amlinkExit': utils.amlink_exit,
'amlinkCheckUpdate': utils.amlink_check_update,
'amlinkCheckInternet': utils.amlink_check_internet}
if command not in local_commands:
await self.engine.send_request(request_id, command, arguments)
link_process = True if command == 'waifuPredict' else False
self.pending_requests[request_id] = [writer, link_process, username]
else:
status, message, data = local_commands[command].__call__()
response = network_controller.NetworkProtocol.create_response(request_id, status, message, data)
await network_controller.NetworkProtocol.await_write(writer, response)
writer.close()
| [
"admin@siujoeng-lau.com"
] | admin@siujoeng-lau.com |
de93f365f175befa5f976bbd2d90d98324e9afdd | 3e29cc4e932bc23cab001a30102ad00701ebe1fa | /venv/bin/ipdb3 | fe6ce6dca3aac4d9381bbcf17a86b3105a502ea3 | [] | no_license | youarefree/wedding_project | 0094201a251328db1687cd416f3830d532148412 | d662c83c29ce316d7339a24df785a8c0cf07a1a1 | refs/heads/master | 2020-05-27T14:51:41.040334 | 2017-02-24T15:32:04 | 2017-02-24T15:32:04 | 82,559,369 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 319 | #!/home/dimitar/101/hackaton/venv/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'ipdb==0.10.2','console_scripts','ipdb3'
__requires__ = 'ipdb==0.10.2'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('ipdb==0.10.2', 'console_scripts', 'ipdb3')()
)
| [
"dbyordanow@gmail.com"
] | dbyordanow@gmail.com | |
4630ece4573f31077a86cd7a0e42b372f2fd486b | 8b373323e8ab2792e650c3d779770cf5cf89f22e | /filemanager/urls.py | 8cbbff786ac468dadc3234d77d29347bc5b5fd2a | [] | no_license | Bohloko/filemanager | ec0d5b650a70b9ce44bd4b4e2f34c6fb33116a6c | 44762e4a94306d159c12ef249de4d24e613c4e96 | refs/heads/master | 2023-06-17T16:55:05.109552 | 2021-07-15T09:37:24 | 2021-07-15T09:37:24 | 385,739,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | from django.urls import path, include
from rest_framework.routers import SimpleRouter
from .views import ApplicationFileViewSet, FileUploadViewSet, UserViewSet, FileSearchView
router = SimpleRouter()
router.register('users', UserViewSet, basename = 'users' )
router.register('files', ApplicationFileViewSet, basename = 'files')
router.register('upload', FileUploadViewSet, basename = 'uploads')
urlpatterns = router.urls
urlpatterns.append(
path('search/', FileSearchView.as_view(), name='search'),
)
urlpatterns.append(
path('api-auth', include('rest_framework.urls'))
)
urlpatterns.append(
path('dj-rest-auth/', include('dj_rest_auth.urls'))
)
urlpatterns.append(
path('dj-rest-auth/registration/', include('dj_rest_auth.registration.urls'))
)
| [
"taubohloko@gmail.com"
] | taubohloko@gmail.com |
25e20a95bf29f57e63196d06f51219f3ec9e2f12 | 5a262566fcbba36e7f41283b886c4a492762a0a9 | /wdapp/management/UserFactory.py | d15c8b7353169ac22711bba7b8955828c428ba35 | [
"MIT"
] | permissive | Engineermar/WisdomDriven | a03705f660c407b2aac1f80ee9c0131cd9f9daa2 | eeda8a759098a16e67ad7b13ed120b30dbc8b618 | refs/heads/master | 2020-04-03T03:30:35.167084 | 2019-05-22T05:08:50 | 2019-05-22T05:08:50 | 154,987,878 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,778 | py | from django.contrib.auth.models import User
from wdapp.models import Company,Business,Driver
from faker import Factory
import factory.fuzzy
class UserFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = User
username = factory.Sequence(lambda n : "bobCaptain {}".format(n))
account = factory.RelatedFactory(CompanyFactory)
class CompanyFactory(factory.django.DjangoModelFactory):
class Meta:
model = Company
company_id = factory.Sequence(lambda n: 'company%d' % n)
user = factory.SubFactory(UserFactory)
company = factory.fuzzy.FuzzyChoice(['Internationals Inc.', 'Hercules Inc.', 'Willamette Industries Inc.','Hibernia Corp.',
'National Corporation', 'Gentek Inc.', 'ARA Corporation', '3Base inc', 'Genesis Ventures Inc.',
'Pitney Bowes Inc.', 'Teradyne Inc', 'BAmerica Corporation', 'Tower Auto Inc.', 'Timken Company',
'The Company', 'Rock-Tenn Co', 'Ent Corporation', 'Phar Corp', 'International Corp.', 'Mobil Corporation'])
trucking_specilization = factory.fuzzy.FuzzyChoice(INDUSTRY)
date_established = factory.fuzzy.FuzzyDate( datetime.date(2008, 1, 1), datetime.date(2018, 12, 31) )
logo = "www.gravatar.com/avatar/55ea9a364c96f4fea387d393f02b8812"
class BusinessFactory(factory.django.DjangoModelFactory):
class Meta:
model = Business
name = factory.Faker('name')
address = factory.Faker('address')
phone_number = factory.Faker('phone_number')
class DriverFactory(factory.django.DjangoModelFactory):
class Meta:
model = Driver
name = factory.Faker('name')
address = factory.Faker('address')
phone_number = factory.Faker('phone_number')
| [
"noreply@github.com"
] | noreply@github.com |
253157c4d42b5b0854fff32b8673309796a5ba90 | be29b355371816a74f023df3a5ecbe812f2b499b | /hummingbot/market/radar_relay/radar_relay_order_book_tracker_entry.py | 56fe64cc63b7bafb619e88fa1c14bca2280cb2d0 | [
"Apache-2.0"
] | permissive | OceanEx/hummingbot | 324e6b59d111bc611d83baf1a87352044cef84d5 | f3dc27b9482093ea2c8d1fa603d0116de1e2fa05 | refs/heads/master | 2021-07-12T18:42:32.496606 | 2021-03-04T03:46:17 | 2021-03-04T03:46:17 | 236,079,080 | 2 | 2 | Apache-2.0 | 2021-03-04T03:44:35 | 2020-01-24T20:35:41 | Python | UTF-8 | Python | false | false | 1,021 | py | from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_tracker_entry import OrderBookTrackerEntry
from hummingbot.market.radar_relay.radar_relay_active_order_tracker import RadarRelayActiveOrderTracker
class RadarRelayOrderBookTrackerEntry(OrderBookTrackerEntry):
def __init__(self,
trading_pair: str,
timestamp: float,
order_book: OrderBook,
active_order_tracker: RadarRelayActiveOrderTracker):
self._active_order_tracker = active_order_tracker
super(RadarRelayOrderBookTrackerEntry, self).__init__(trading_pair, timestamp, order_book)
def __repr__(self) -> str:
return (
f"RadarRelayOrderBookTrackerEntry(trading_pair='{self._trading_pair}', timestamp='{self._timestamp}', "
f"order_book='{self._order_book}')"
)
@property
def active_order_tracker(self) -> RadarRelayActiveOrderTracker:
return self._active_order_tracker
| [
"yzhang1994@Gmail.com"
] | yzhang1994@Gmail.com |
04bcdaacff968603d6ef72fc8894e662f8836a1d | 8a0db63a5f267583214a6bde2ce40b471f7e92c1 | /loja/urls.py | e1542294e417109e26f3a34e3aa9e7a02600599d | [] | no_license | brendonlucas/Shopping-API | fea28195f08ab72f802b5f64992cb06376b35caa | ef671400197e3378f5c12883af3dd038ec1ffe03 | refs/heads/master | 2020-09-13T17:09:50.155084 | 2019-12-05T01:03:21 | 2019-12-05T01:03:21 | 222,849,306 | 0 | 0 | null | 2019-12-05T01:10:14 | 2019-11-20T04:17:35 | Python | UTF-8 | Python | false | false | 1,095 | py | from django.urls import path
from loja import views
from loja.views import *
urlpatterns = [
# path('lojas/', views.lojas_list, name='list_lojas'),
path('lojas/', LojasList.as_view(), name='list_lojas'),
path('lojas/<int:id_loja>/', LojaDetalhes.as_view(), name='Detalhes_lojas'),
# path('lojas/<int:id_loja>/produtos/', views.produtos_list, name='list_produtos'),
path('lojas/<int:id_loja>/produtos/', ProdutosList.as_view(), name='list_produtos'),
# path('lojas/<int:id_loja>/produtos/<int:id_produto>/', views.produto_detalhes, name='detalhes_produto'),
path('lojas/<int:id_loja>/produtos/<int:id_produto>/', ProdutoDetalhes.as_view(), name='detalhes_produto'),
# path('lojas/<int:id_loja>/produtos/<int:id_produto>/compra/', views.produto_compra, name='compra_produto'),
path('lojas/<int:id_loja>/produtos/<int:id_produto>/compra/', ProdutoCompra.as_view(), name='compra_produto'),
# path('lojas/<int:id_loja>/vendas/', views.vendas_loja, name='vendas_loja'),
path('lojas/<int:id_loja>/vendas/', VendaLoja.as_view(), name='vendas_loja'),
]
| [
"brendonplay007@gmail.com"
] | brendonplay007@gmail.com |
042558fd36b61b423dc4cefd58f98f19f15d08c8 | f6bb211338114cc9cc66fbec0e0e17ad5b7eaf85 | /School.py | 9d76b59ce61af2a05b3fb300f795c019f9223193 | [] | no_license | sajjad0927/MySchoolProject | 4cc6a05741c532866d47b0677349a6a97a881e4c | 43ced6b5ba8adff6700d372dadb6047e2fc89f52 | refs/heads/master | 2020-03-30T07:13:30.227057 | 2018-09-30T02:57:36 | 2018-09-30T02:57:36 | 150,916,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 992 | py |
class School:
'Class To store and get the school information'
__Name='';__Id=0;__Address='';__State='';__City='';__Pin=0
def __init__(self,name,id,address,state,city,pin):
self.__Name=name
self.__Address=address
self.__Id=id
self.__City=city
self.__State=state
self.__Pin=pin
def setSchoolAttributs(self,name,id,address,state,city,pin):
__Name=name
__Address=address
__Id=id
__City=city
__State=state
__Pin=pin
def getSchoolDetails(self):
print("Printing")
print ("Name=",self.__Name)
print ("Address=",self.__Address)
print ("ID=",self.__Id)
print ("State=",self.__State)
print ("City=",self.__City)
print ("Pin=",self.__Pin)
#def main():
#School.setSchoolAttributs("School1",1,"Address1","State1","City1",55)
#School.getSchoolDetails()
#if __name__=='__main__':
#main()
| [
"sajjad0927@gmail.com"
] | sajjad0927@gmail.com |
ad3fc34e434f57f06f36860bfa40e98831eb7372 | 7ece94f0313b37ab44a295c06d04860a1f308a30 | /MovieFolderRenamer.py | 8db40d97e8e22531b551510885e2f0012662ff81 | [] | no_license | elgar-groot/movie-renamer | 227eef7716d9f53228fd36b58e50377d6bb4ffbd | 9a3675917ebcf71bdd3459300f1a3c521a2f86c3 | refs/heads/master | 2016-08-03T18:11:45.737632 | 2014-02-28T16:53:14 | 2014-02-28T16:53:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | #!/usr/bin/python
import os
from glob import *
from shutil import *
import sys
from re import *
from httplib import *
import json
class MovieFolderRenamer:
'''A class that can update all movie folder names in a specified folder
to include the year, so Couchpotato will recognize them.
Makes use of omdbapi.com'''
def __init__(self, movie_folder):
self.connection = HTTPConnection("www.omdbapi.com")
self.movie_folder = movie_folder if movie_folder[-1]=='/' else movie_folder+'/'
self.re = compile('.*\([0-9]{4}\)')
def rename_folder(self, folder):
name = os.path.basename(folder).replace(' ', '%20')
self.connection.request("GET", "/?s=" + name)
answer = self.connection.getresponse().read()
try:
js = json.loads(answer)
year = js['Search'][0]['Year']
new_name = name + ' ('+year+')'
os.rename(folder, self.movie_folder+new_name)
print 'Renaming', name, ' << ', new_name
except:
print 'Unable to rename: ', name
def rename(self):
folders = glob(self.movie_folder+'*')
for folder in folders:
if( self.re.match(folder) == None):
self.rename_folder(folder)
def main():
try:
movie_folder = sys.argv[1]
renamer = MovieFolderRenamer(movie_folder)
renamer.rename()
except IndexError as err:
print >>sys.stderr, "Usage: {0} path_to_moviefolder".format(sys.argv[0])
return 1
# main method call
if __name__ == "__main__":
main()
| [
"elgar.groot@gmail.com"
] | elgar.groot@gmail.com |
ea699af4008afcb9c81e4e18774c8e0a4bb71e09 | 49c4597a93a3208c70b33464ac926a0594d4965f | /src/python/setup.py | 40f41b1d10bdb8f5b289328b725784db2d53fabb | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | victor8733/phyre | 690b6e7c13365f54d580e6ca9a0440f781b26b43 | 1787b546b37ab838934165fec42e9567fbe1e301 | refs/heads/master | 2020-07-05T16:19:02.317709 | 2019-08-15T15:41:10 | 2019-08-15T15:41:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,783 | py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
import setuptools
import setuptools.command.build_ext
BUILD_COMMANDS = [['make', 'react_deps'], ['make', 'develop']]
class build_ext(setuptools.command.build_ext.build_ext):
def run(self):
for command in BUILD_COMMANDS:
subprocess.check_call(command, cwd='../..')
setuptools.command.build_ext.build_ext.run(self)
self.run_command('egg_info')
setuptools.setup(name='phyre',
version='0.0.1',
author='Facebook AI Research',
package_data={
'phyre': [
os.path.join('interface', '*', '*.py'),
os.path.join('simulator_bindings*.so'),
os.path.join('data', '*'),
os.path.join('data', '*', '*'),
os.path.join('data', '*', '*', '*'),
os.path.join('viz_static_file', '*'),
os.path.join('viz_static_file', '*', '*'),
]
},
packages=['phyre', 'phyre.creator', 'phyre.viz_server'],
install_requires=[
'nose', 'numpy', 'tornado', 'thrift', 'imageio', 'scipy',
'joblib'
],
cmdclass={'build_ext': build_ext})
| [
"7470747+akhti@users.noreply.github.com"
] | 7470747+akhti@users.noreply.github.com |
129c7d6dade54a4eac44f6e25e6a1cf9bb3f35cd | 3d915fe47b258a6833cb332c5b9d807ae9b55375 | /test_appium/po/page/app.py | 906f9e1c283b1d2697a9578316e6ddf202204c45 | [] | no_license | liuzhiyang123/hogwarts | 73080468c75072501d66d81b75565d80d231c776 | 2761306060bf5898991e282d15d4b24dadb6bcf7 | refs/heads/master | 2023-07-05T13:39:37.572188 | 2021-08-14T02:07:47 | 2021-08-14T02:07:47 | 319,951,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | from appium import webdriver
from test_appium.po.page.base_page import BasePage
from test_appium.po.page.main_page import MainPage
class App(BasePage):
def start(self):
caps = {
'platformName': "Android",
'platformVersion': "6",
'deviceName': "vivo x5",
# 'app': "/path/to/the/downloaded/ApiDemos-debug.apk",
'appPackage': "com.tencent.wework",
'appActivity': ".launch.LaunchSplashActivity", # ".launch.LaunchSplashActivity",
'automationName': "UiAutomator2",
'systemPort': '8200',
'noReset': 'true',
'skipServerInstalltion': 'true', # 跳过安装
'udid': '8f7fb4bc',
# 'newCommandTimeout': 120,
'adbExecTimeout': 20000,
'settings[waitForIdleTimeout]': 0, # 设置页面等待空闲状态的时间
'newCommandTimeout': 120
}
for i in range(2):
try:
self.driver = webdriver.Remote('http://127.0.0.1:4700/wd/hub', caps)
except Exception as e:
if i == 1:
raise e
def goto_main_page(self):
return MainPage(self.driver)
| [
"liuzy@liuzydeMacBook-Pro.local"
] | liuzy@liuzydeMacBook-Pro.local |
ebc524685706f7301bb2ed5171e45241996284a5 | c9d9f17f4e3021a19e5450934a8a59764cb74e85 | /swig/python/test/main.py | 3017dbcf2fb4418a7d39e21727f57e225e9664d1 | [
"MIT"
] | permissive | longlongwaytogo/Learning.test | bfdb5c66ea127b867c73fd2f33d0fe7e962fdb81 | b2a2d95c0e1195679a9950c1a24cca071005ef6e | refs/heads/master | 2023-08-09T06:47:42.572746 | 2023-07-29T10:19:02 | 2023-07-29T10:19:02 | 92,352,631 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,594 | py | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.33
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_main', [dirname(__file__)])
except ImportError:
import _main
return _main
if fp is not None:
try:
_mod = imp.load_module('_main', fp, pathname, description)
finally:
fp.close()
return _mod
_main = swig_import_helper()
del swig_import_helper
else:
import _main
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
def compute(a, b):
return _main.compute(a, b)
compute = _main.compute
# This file is compatible with both classic and new-style classes.
| [
"longlongway2012@hotmail.com"
] | longlongway2012@hotmail.com |
cde1e0b1edac266be25950e13a23ca56ae2abe8e | 5d65d7bc282ae86ad9ed305117273601a0b7e0e4 | /reminder.py | 68896b32767ce5dec7f2a4b2152d5fb69a095c23 | [] | no_license | JanLetovanec/jane_bot | d37b09f238409374e7d326242137268fab70243e | cff42c4e4e737ddb9f2e9fd5e92ba9a7e00bb795 | refs/heads/master | 2020-09-13T09:10:10.136939 | 2020-03-11T14:46:56 | 2020-03-11T14:46:56 | 222,719,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,126 | py | """ Helper library for remind function """
from pprint import pprint
import time
possible_replies = [
"Hi {target}, sorry to interupt you, but {author} wanted me to remind you: {text}",
"{target}! {author} wanted me to tell you this: {text}",
"{target}, hope I am not interupting you, but : {text} . Just thought you might like to know."
]
def format_text(text):
""" Formats the string to reasonable and uniform way """
text = text.replace('to do ', '') \
.replace('to ', '') \
.replace(' at ', ' on ') \
.replace(' a ', ' 1 ') \
.replace(' an ', ' 1 ') \
.replace('minutes', '60') \
.replace('minute', '60') \
.replace('seconds', '1') \
.replace('second', '1') \
.replace('hours', '3600') \
.replace('hour', '3600')
text = text.split(' ')
pure_text = ""
if text[-3] == 'in':
for i in range(0, (len(text)-3)):
pure_text += " " + text[i]
final_text = [pure_text, text[-3], text[-2], text[-1]]
else:
for i in range(0, (len(text)-2)):
pure_text += text[i]
final_text = [pure_text, text[-2], text[-1]]
if len(text) < 3:
pprint(final_text)
raise Exception("Bad remind request")
return final_text
def parse_time(text):
""" Tries to parse the time as written by user"""
# When keyword is 'in' adds values to time
if text[-3] == 'in':
remind_time = time.gmtime(int(text[-2]) * int(text[-1]) + time.time())
# Otherwise try to parse time as written
else:
remind_time = text[-1].replace(':', ' ') \
+ " " \
+ time.strftime("%m/%d/%y", time.gmtime(time.time()))
remind_time = time.strptime(remind_time, "%H %M %m/%d/%y")
return remind_time
def generate_data(entry):
target = entry[0]
text = entry[1]
author = entry[3]
channel = entry[4]
if target == author:
author = 'you'
target = target.mention
else:
author = author.name
target = target.mention
return target, text, author, channel
| [
"janko.let@gmail.com"
] | janko.let@gmail.com |
444c29a2b44228ef93f4c336d14eb3ba9a77dba4 | 358f7f248cee5069ccbca9ae6dc463bbd9de4ac6 | /window.py | 2a9f6acbb9b9332d5cff82f6a7aa64cf07e5840e | [] | no_license | zaharko/zebra | 1a255456034c3840b6720f144b27c5a8d41b1297 | bf846b38ff6948234a0dffc2ae14963c758f3e4f | refs/heads/master | 2020-07-13T00:59:49.972420 | 2019-08-28T14:50:22 | 2019-08-28T14:50:22 | 204,953,603 | 0 | 0 | null | 2019-08-28T16:38:06 | 2019-08-28T14:38:05 | null | UTF-8 | Python | false | false | 5,572 | py | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QMainWindow, QGridLayout, QLabel, QSpacerItem, QSizePolicy, QComboBox, QFileDialog
from plot_canvas import PlotCanvas
class Window(QMainWindow):
'''
How it works:
First a window is created.
Then all the widgets (buttons, sliders, plots) are added to the window and hidden.
Finally depending on what view is requested widgets are shown or hidden.
This way we work around the need for tabs and maintain a quick and simple UI.
'''
def __init__(self):
super().__init__()
self.container = QWidget(self)
self.grid = QGridLayout()
self.setCentralWidget(self.container)
self.container.setLayout(self.grid)
self.define_widgets()
self.set_widget_actions()
self.main_view()
self.setGeometry(0, 0, 500, 300)
self.setWindowTitle("Zebra")
self.show()
sys.exit(app.exec_())
def define_widgets(self):
''' Define widgets such as buttons. This must be done before setting widget actions.
Otherwise a widget action might try to modify a widget that is not defined yet.'''
# add empty label as spacer after buttons
self.grid.addWidget(QLabel(), 10,10)
self.button_zebra = self.make_button("1 Zebra", self.grid, 0, 0 )
self.button_back = self.make_button("Back", self.grid, 0,0)
self.button_dmc = self.make_button("2 DMC", self.grid, 0,1)
self.button_transfer = self.make_button("3 Transfer\nZebra-DMC-Laue", self.grid, 0,2)
self.m_range_label = QLabel("Measurement Range:")
self.grid.addWidget(self.m_range_label, 0, 2)
self.plot_canvas = PlotCanvas(width = 5, height = 5)
self.plot_canvas.setFixedWidth(400)
self.plot_canvas.setFixedHeight(300)
self.grid.addWidget(self.plot_canvas,0,1,3,1)
self.m_range_combo = QComboBox()
self.m_range_combo.addItems(["hkl", "-hkl", "hex", "cub"])
self.grid.addWidget(self.m_range_combo, 0,3)
self.add_data_label = QLabel("Add Data:")
self.grid.addWidget(self.add_data_label,1,2)
self.add_data_button = self.make_button("Open File", self.grid, 1, 3)
self.button_zebra13 = self.make_button("1.3", self.grid, 3,3)
self.button_dmc21 = self.make_button("2.1", self.grid, 0,1)
self.button_dmc22 = self.make_button("2.2", self.grid, 1,1)
def set_widget_actions(self):
# define button actions
self.button_zebra.clicked.connect(self.zebra_view)
self.button_back.clicked.connect(self.main_view)
self.button_dmc.clicked.connect(self.dmc_view)
self.button_transfer.clicked.connect(self.transfer_view)
self.m_range_combo.activated[str].connect(self.set_m_range)
self.add_data_button.clicked.connect(self.open_file)
self.button_zebra13.clicked.connect(self.zebra13_action)
self.button_dmc21.clicked.connect(self.dmc21_action)
self.button_dmc22.clicked.connect(self.dmc22_action)
def hide_all(self):
self.button_zebra.setHidden(True)
self.button_back.setHidden(True)
self.button_dmc.setHidden(True)
self.button_transfer.setHidden(True)
self.m_range_label.setHidden(True)
self.m_range_combo.setHidden(True)
self.plot_canvas.setHidden(True)
self.add_data_label.setHidden(True)
self.add_data_button.setHidden(True)
self.button_zebra13.setHidden(True)
self.button_dmc21.setHidden(True)
self.button_dmc22.setHidden(True)
def main_view(self):
self.hide_all()
self.button_zebra.setHidden(False)
self.button_dmc.setHidden(False)
self.button_transfer.setHidden(False)
def zebra_view(self):
self.hide_all()
self.button_back.setHidden(False)
self.plot_canvas.setHidden(False)
self.m_range_label.setHidden(False)
self.m_range_combo.setHidden(False)
self.add_data_label.setHidden(False)
self.add_data_button.setHidden(False)
self.button_zebra13.setHidden(False)
def dmc_view(self):
self.hide_all()
self.button_back.setHidden(False)
self.button_dmc21.setHidden(False)
self.button_dmc22.setHidden(False)
def transfer_view(self):
self.hide_all()
self.button_back.setHidden(False)
def open_file(self):
''' Corresponds to 1.2
TODO Right now this is just copy&pasted. Clean this up.'''
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","All Files (*);;Python Files (*.py)", options=options)
if fileName:
print(fileName)
pass
def zebra13_action(self):
pass
def dmc21_action(self):
pass
def dmc22_action(self):
pass
def set_m_range(self, selection):
''' Corresponds to 1.1'''
print(selection)
pass
def make_button(self, text, grid, row, col):
"""Create a button in the grid and hide it."""
button = QPushButton(self)
button.setText(text)
grid.addWidget(button, row, col)
button.setHidden(True)
button.setFixedHeight(50)
button.setFixedWidth(150)
return button
if __name__ == '__main__':
app = QApplication(sys.argv)
hello = Window()
sys.exit(app.exec())
| [
"ivan@olexyn.com"
] | ivan@olexyn.com |
e2616b15545ff9e24f76b4fa5ffbd27b156aac2b | 8e0567b913653c6d978b8f69a297449fa475cd74 | /opt_classifier/opt_classifier.py | dd9dd34e020b9f44d24b7f9259814bdbab1b9fc7 | [] | no_license | ercarpio/SG_CNN | 0521cadc13180f6886fe945a0a4763d1c19655c2 | 77f519f3b0f4cd3d2bdfad611a833c061767ee68 | refs/heads/master | 2021-09-19T10:29:30.593422 | 2018-07-27T00:08:38 | 2018-07-27T00:08:38 | 113,371,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,667 | py | import tensorflow as tf
# contains information relating to input data size
from common.constants import *
# network layer information for P_CNN
layer_elements = [-1, 16, 32, 128, OPT_CLASSES]
output_sizes = [32, 16, 4]
filter_sizes = [4, 4, 8]
stride_sizes = [2, 2, 4]
padding_size = [1, 1, 2]
'''
ClassifierModel generates q-values for a given input observation
'''
class ClassifierModel:
# Constructor
"""
batch_size - int (1 by default)
filename - string, location of file with saved model parameters (no model listed by default)
learning_rate - float, speed at which the model trains (1e-5 by default)
"""
def __init__(self, batch_size=1, filename="", learning_rate=1e-5):
self.graph = tf.Graph()
with self.graph.as_default():
self.__batch_size = batch_size
self.__alpha = learning_rate
# Model variables
def weight_variable(name, shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(name, shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
# Q variables
self.variables_pnt = {
"W1": weight_variable("W_conv1_pnt", [filter_sizes[0], filter_sizes[0],
pnt_dtype["num_c"], layer_elements[1]]),
"b1": bias_variable("b_conv1_pnt", [layer_elements[1]]),
"W2": weight_variable("W_conv2_pnt", [filter_sizes[1], filter_sizes[1],
layer_elements[1], layer_elements[2]]),
"b2": bias_variable("b_conv2_pnt", [layer_elements[2]]),
"W3": weight_variable("W_conv3_pnt", [filter_sizes[2], filter_sizes[2],
layer_elements[2], layer_elements[-2]]),
"b3": bias_variable("b_conv3_pnt", [layer_elements[-2]]),
"W_lstm": weight_variable("W_lstm", [layer_elements[-2], layer_elements[-1]]),
"b_lstm": bias_variable("b_lstm", [layer_elements[-1]]),
"W_fc": weight_variable("W_fc", [layer_elements[-1] + 1, layer_elements[-1]]),
"b_fc": bias_variable("b_fc", [layer_elements[-1]])
}
# Placeholder variables
# placeholder for the Optical Flow data
self.pnt_ph = tf.placeholder("float",
[self.__batch_size, None,
pnt_dtype["cmp_h"] * pnt_dtype["cmp_w"] * pnt_dtype["num_c"]],
name="pnt_placeholder")
# placeholder for the sequence length
self.seq_length_ph = tf.placeholder("int32", [self.__batch_size],
name="seq_len_placeholder")
# placeholder for the reward values to classify with
self.pnt_y_ph = tf.placeholder("float", [None, OPT_CLASSES], name="pnt_y_placeholder")
# Build Model Structure
# initialize all variables in the network
self.pred_wave_set = self.execute_wave_var_set() # used to initialize variables
# Q-value Generation Functions
# return the action with the highest q-value
self.wave_observed = tf.argmax(self.execute_wave(), 1)
# Optimization Functions
# get the difference between the q-values and the true output
self.cross_entropy_wave = tf.nn.softmax_cross_entropy_with_logits(
labels=self.pnt_y_ph, logits=self.execute_wave())
# optimize the network
self.optimizer_wave = tf.train.AdamOptimizer(learning_rate=self.__alpha).minimize(
self.cross_entropy_wave)
# Evaluation Functions
# return a boolean indicating whether the system correctly predicted the output
self.correct_pred_wave = tf.equal(tf.argmax(self.wave_observed, 1),
tf.argmax(self.pnt_y_ph, 1))
# the accuracy of the current batch
self.accuracy_wave = tf.reduce_mean(tf.cast(self.correct_pred_wave, tf.float32))
# Initialization
# Generate Session
self.sess = tf.InteractiveSession(graph=self.graph)
# Variable for generating a save checkpoint
self.saver = tf.train.Saver()
if len(filename) == 0:
# initialize all model variables
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
print("VARIABLE VALUES INITIALIZED")
else:
# restore variables from a checkpoint
self.saver.restore(self.sess, filename)
print("VARIABLE VALUES RESTORED FROM: " + filename)
# Helper Functions
def save_model(self, name="model.ckpt", save_dir=""):
"""
save the model to a checkpoint file
-name: (string) name of the checkpoint file
-save_dir: (string) directory to save the file into
"""
self.saver.save(self.sess, save_dir + '/' + name)
# Executor Functions
def execute_wave_var_set(self):
# Initialize the model's structure
return self.wave_model(
self.seq_length_ph,
self.pnt_ph,
tf.variable_scope("wave"),
tf.variable_scope("wave"),
self.variables_pnt
)
def execute_wave(self):
# Generate the q-values of Q for the given input
return self.wave_model(
self.seq_length_ph,
self.pnt_ph,
tf.variable_scope("wave"),
tf.variable_scope("wave", reuse=True),
self.variables_pnt
)
# The Model
def process_vars(self, seq, data_type):
# cast inputs to the correct data type
seq_inp = tf.cast(seq, tf.float32)
return tf.reshape(seq_inp, (self.__batch_size, -1, data_type["cmp_h"],
data_type["cmp_w"], data_type["num_c"]))
def check_legal_inputs(self, tensor, name):
# ensure that the current tensor is finite (doesn't have any NaN values)
return tf.verify_tensor_all_finite(tensor, "ERR: Tensor not finite - " + name, name=name)
def wave_model(self, seq_length, pnt_ph, variable_scope, variable_scope2, var_pnt):
"""
-seq_length: (placeholder) the number of frames in the video
-pnt_ph: (placeholder) an array that contains the optical flow data
-train_ph: (placeholder) a bool indicating whether the variables are being trained
-variable_scope: (variable_scope) scope for the CNN stacks
-variable_scope2: (variable_scope) scope for the temporal data
-var_pnt: (dict) the variables for the optical flow input
"""
# Convolution Functions
def convolve_data_3layer_pnt(input_data, variables, n, dtype):
# pass data into through P_CNN
def pad_tf(x, p):
return tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], "CONSTANT")
def gen_convolved_output(sequence, W, b, stride, num_hidden, new_size, padding='SAME'):
conv = tf.nn.conv2d(sequence, W, strides=[1, stride, stride, 1],
padding=padding) + b
return tf.nn.relu(conv)
input_data = tf.reshape(input_data,
[-1, dtype["cmp_h"], dtype["cmp_w"], dtype["num_c"]],
name=n + "_inp_reshape")
for i in range(3):
si = str(i + 1)
input_data = pad_tf(input_data, padding_size[i])
padding = "VALID"
input_data = gen_convolved_output(input_data, variables["W" + si],
variables["b" + si], stride_sizes[i],
layer_elements[i + 1], output_sizes[i], padding)
input_data = self.check_legal_inputs(input_data, "conv" + si + "_" + n)
return input_data
# =======================================
# Model Execution Begins Here
# =======================================
# CNN Stacks
# Inception Network (INRV2)
with variable_scope as scope:
# P_CNN
inp_data = self.process_vars(pnt_ph, pnt_dtype)
conv_inp = convolve_data_3layer_pnt(inp_data, var_pnt, "pnt", pnt_dtype)
conv_inp = tf.reshape(conv_inp, [self.__batch_size, -1,
output_sizes[-1] * output_sizes[-1] *
layer_elements[-2]], name="combine_reshape")
# capture variables before changing scope
W_lstm = var_pnt["W_lstm"]
b_lstm = var_pnt["b_lstm"]
with variable_scope2 as scope:
# Internal Temporal Information (LSTM)
lstm_cell = tf.contrib.rnn.LSTMCell(layer_elements[-2],
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None
)
lstm_mat, _ = tf.nn.dynamic_rnn(
cell=lstm_cell,
inputs=conv_inp,
dtype=tf.float32,
sequence_length=seq_length,
time_major=False
)
# if lstm_out is NaN replace with 0 to prevent model breakage
lstm_mat = tf.where(tf.is_nan(lstm_mat), tf.zeros_like(lstm_mat), lstm_mat)
lstm_mat = self.check_legal_inputs(lstm_mat, "lstm_mat")
# extract relevant information from LSTM output using partitions
lstm_out = tf.expand_dims(lstm_mat[0, -1], 0)
# FC1
fc1_out = tf.matmul(lstm_out, W_lstm) + b_lstm
fc1_out = self.check_legal_inputs(fc1_out, "fc1")
return fc1_out
if __name__ == '__main__':
dqn = ClassifierModel()
| [
"eccarpio@hotmail.com"
] | eccarpio@hotmail.com |
3b89389daeeefbd5bfb316297767be67e33037ad | aef5c3a8fc1a0849e8ed7dcdf4ea0446f64c342c | /zapd/admin.py | 11cffccef493d84b52ed6a47db8f4850407810cd | [] | no_license | eoliveros/zapd | c21e05dde1b318870483a2a34799fffdd1fcbd69 | b17afbc5b05fcbd27370d9ea9e6c2e6fc6bed7d6 | refs/heads/master | 2022-10-16T02:01:49.969941 | 2020-06-16T00:36:15 | 2020-06-16T00:36:15 | 171,779,747 | 0 | 0 | null | 2019-02-21T01:43:15 | 2019-02-21T01:43:14 | null | UTF-8 | Python | false | false | 1,402 | py | from flask import url_for
import flask_admin
from flask_admin import helpers as admin_helpers
from app_core import app, db
from models import security, RestrictedModelView, ProposalModelView, UserModelView, TransactionRestrictedModelView, AMWalletRestrictedModelView, \
Role, User, Category, Proposal, Transaction, CreatedTransaction, AMWallet, AMDevice
# Create admin
admin = flask_admin.Admin(
app,
'ZAPD Admin',
base_template='my_master.html',
template_mode='bootstrap3',
)
# Add model views
admin.add_view(UserModelView(User, db.session, category='Admin'))
admin.add_view(RestrictedModelView(Role, db.session, category='Admin'))
admin.add_view(RestrictedModelView(Category, db.session, category='Admin'))
admin.add_view(AMWalletRestrictedModelView(AMWallet, db.session, name='App Metrics - Wallet', category='Admin'))
admin.add_view(ProposalModelView(Proposal, db.session))
admin.add_view(TransactionRestrictedModelView(Transaction, db.session, category='ZAPD'))
admin.add_view(RestrictedModelView(CreatedTransaction, db.session, category='ZAPD'))
# define a context processor for merging flask-admin's template context into the
# flask-security views.
@security.context_processor
def security_context_processor():
return dict(
admin_base_template=admin.base_template,
admin_view=admin.index_view,
h=admin_helpers,
get_url=url_for
)
| [
"djpnewton@gmail.com"
] | djpnewton@gmail.com |
a18a50416d57b4791dde890213ed60f4153bf41c | 2e917d5d889baca195865d8d7330a558aef05870 | /simulationWithBurnoutPrediction/simulationWithBurnoutPrediction/PopulationCaregiver.py | e5de0583d9dd98fc13d950512cfe8fb8abe6f552 | [] | no_license | Obatata/SimulationAndPrediction | 4984b2af1dcffa87ba83e35fc944660d9134e590 | 74e8e81f8b182fb15c88e01685557f4856d73f61 | refs/heads/master | 2020-04-29T11:16:09.863197 | 2019-03-17T11:25:27 | 2019-03-17T11:25:27 | 176,091,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,695 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 15:35:49 2018
@author: Oussama BATATA
"""
# import library .........
import numpy as np
import InputData
import Caregiver
import ServiceOfRespite
class PopulationCaregiver:
"""Classe définiss un aidant (caregiver) caractérisée par :
- nbCluster = Nbr de classes d'aidants
- nbrStatePerCluster = pour chaque classe le nombre d'état de la chaîne de Markov peut être spéciphique
- nbCaregiverPerCluster = nombre d'aidants pour chaque classe d'aidants
- timeHorizon = SLe nombre de pas de temps de notre simulation
- listeCaregiver = la liste contenant les aidants et leurs attributs
- markovMatrix = la matrice de %markov pour les probabilities de transitions entre les états dépuisement
- durationInHospital = La durée d'hospitalisation si l'a a fait à son patient"""
def __init__(self, inputData, serviceRepit): # Notre méthode constructeur
# -------------------------------------
## attributs pour acceder à la classe inputData
self.__inpuData = inputData
self.__serviceRepit = serviceRepit
#self.__respiteHome = respiteHome
## attributs de la class de la population en entier
self.__nbCluster = 0
self.__nbrStatePerCluster = 0
self.__nbCaregiversPerCluster = 0
self.__timeHorizon = 0;
self.__listeCaregivers = [] # Cette liste contient tout les caregivers avec leur cluster et idClusyter confondu
self.__matrixOfCaregivers = [] # Cette liste contient des listes de cluster d'aidants, chaque liste contient les caregivers de son cluster
self.__markovMatrix = []
# -----------------------------------
"""" Fcontion pour importer les données """
def importData (self):
self.__nbCluster = self.__inpuData.getNuberCluster()
self.__nbrStatePerCluster = self.__inpuData.getNberStatePerCluster()
self.__nbCaregiversPerCluster = self.__inpuData.getNbCaregiversPerCluster()
self.__timeHorizon = self.__inpuData.getTimeHorizion()
self.__listeCaregivers = self.__inpuData.getListeCaregivers()
self.__markovMatrix = self.__inpuData.getMarkovMatrix()
self.__burnoutCaregivers = self.__inpuData.getBurnoutCaregivers()
'''---------------------Mes fonction gets en dehors de la calss-----------------------------'''
#self.__nbCluster
def getNbCluster(self):
return self.__nbCluster
#self.__nbrStatePerCluster
def getNbStateOerCluster(self):
return self.__nbrStatePerCluster
#self.__nbCaregiversPerCluster
def getNbCaregiversPerCluster(self):
return self.__nbCaregiversPerCluster
#self.__timeHorizon
def getTimeHorizon(self):
return self.__timeHorizon
#self.__listeCaregivers
def getListeCaregiver(self):
return self.__listeCaregivers
#self.__burnoutCaregivers
def getBurnoutCaregivers(self):
return self.__burnoutCaregivers
#self.__matrixOfCaregivers
def getMatrixOfCaregivers(self):
return self.__matrixOfCaregivers
#self.__markovMatrix
def getMarkovMatrix(self):
return self.__markovMatrix
''' --------------------------------------------Fin de mes fonctions getters ------------------------------'''
def generatePopulation(self):
id = 0
for i in range(self.__nbCluster):
tableTemp = []
for j in range(self.__nbCaregiversPerCluster):
caregiver = Caregiver.Caregiver(id, i, j, self.__burnoutCaregivers[i][j][0], self.__serviceRepit)
id+=1
self.__listeCaregivers.append(caregiver)
tableTemp.append(caregiver)
self.__matrixOfCaregivers.append(tableTemp)
def getCaregiverByTwoId(self, idCluster, idIncluster):
for i in self.__listeCaregivers:
if i.idCluster == idCluster and i.idInCluster == idIncluster :
return i
def updateStateCaregiver(self, idCluster, idInCluster, timePeriod, caregiver):
'''print("je rentre dans l'update state de l'aidant suivant :")
print(self.__matrixOfCaregivers[idCluster][idInCluster])'''
valRand = np.random.randint(0,10)/10
#print("ma val rand est : "+str(valRand))
sommeCumul = 0
#caregiver = self.__matrixOfCaregivers[idCluster][idInCluster]
line = int(caregiver.getBurnout() -1)
#print("ligne dans la matrice de Markov"+str(self.__markovMatrix[idCluster][timePeriod]))
it = 0
#print("la matrice de transtions à la ligne est : " +str(self.__markovMatrix[idCluster][timePeriod][line]))
while it < len(self.__markovMatrix[idCluster][timePeriod][line]):
sommeCumul+=self.__markovMatrix[idCluster][timePeriod][line][it]
#print("ma somme cumul est :"+str(sommeCumul))
if valRand <= sommeCumul:
caregiver.setBurnout(float(it+1))
#print("apres transition l'aidant est comme suit")
#print(self.__matrixOfCaregivers[idCluster][idInCluster])
break
it+=1
def printTableCaregivers(self):
for i in range(5):
print('*'*33)
print(self.__listeCaregivers[i])
def __repr__(self):
return " La classe Population of caregiver comporte: nbCluster({}), nbCaregiverPerCluster({}), nbStatesPerCluster({}), timeHorizon({})".format(self.__nbCluster, self.__nbCaregiversPerCluster, self.__nbrStatePerCluster,
self.__timeHorizon)
| [
"batata.oussama@gmail.com"
] | batata.oussama@gmail.com |
02ce6a6dc210e59843e1b6067deefc5a2b0e3d32 | ac9ae2035fee25ebe989d1a7482fbfc416493870 | /impCheck.py | 0b62776da2c1b63eb8924f568bfd74fe95ff0b98 | [] | no_license | namanitta/PE-JsonParser | b827f86a11253b366d0af8257c68c9ef28e59a06 | 367a0ea19d0e5bc17ef5489a805118563d4beb70 | refs/heads/master | 2022-01-16T15:47:23.977719 | 2019-07-21T22:27:45 | 2019-07-21T22:27:45 | 198,111,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | import os
fp = "/home/cuckoo/Desktop/scripts/PE-Attributes/PE_Jsonparser/results/maldb/txt_files/"
for r, d, f in os.walk(fp):
for files in f:
mal = fp+files
fi = open (mal)
lines = fi.readlines()
for line in lines:
if "Imphash" in line:
en = line.rfind(":")
v = line[en+1:].strip().rstrip()
if v == "None":
pass
else:
print v
| [
"noreply@github.com"
] | noreply@github.com |
aea3f3887f24cd7bd8e675548e6a6e5ed1ffd007 | ee795e3a8841fe69866d1fd7cbe808e3e9889a5e | /config/models.py | a326e0485ed425df64bd8890e2965b61ba27f99e | [] | no_license | godlike1215/mysite_3 | 94115ab8b08f2afa37e7f7fd4516ecf5522dcb0a | 7a4c3522a60a49769ff5885fce012acdc3f81dee | refs/heads/master | 2022-11-30T14:15:55.459105 | 2019-09-23T09:35:46 | 2019-09-23T09:35:46 | 202,091,447 | 0 | 0 | null | 2022-11-22T02:10:11 | 2019-08-13T07:43:10 | Python | UTF-8 | Python | false | false | 2,823 | py | from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from django.template.loader import render_to_string
class Link(models.Model):
STATUS_NORMAL = 1
STATUS_DELETE = 0
STATUS_ITEMS = (
(STATUS_NORMAL, '正常'),
(STATUS_DELETE, '删除'),
)
title = models.CharField(max_length=50, verbose_name='标题')
href = models.URLField(verbose_name='链接')
status = models.PositiveIntegerField(default=STATUS_NORMAL, choices=STATUS_ITEMS, verbose_name='状态')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
owner = models.ForeignKey(User, verbose_name='作者')
weight = models.PositiveIntegerField(default=1, choices=zip(range(1, 6),
range(1, 6)), verbose_name='权重',
help_text='权重高展示顺序靠前')
def __str__(self):
return self.title
class Meta:
verbose_name_plural = verbose_name = '友链'
class SideBar(models.Model):
STATUS_SHOW = 1
STATUS_HIDE = 0
STATUS_ITEMS = (
(STATUS_SHOW, '展示'),
(STATUS_HIDE, '隐藏'),
)
DISPLAY_HTML = 1
DISPLAY_LATEST = 2
DISPLAY_HOT = 3
DISPLAY_COMMENT = 4
SIDE_TYPE = (
(DISPLAY_HTML, 'HTML'),
(DISPLAY_LATEST, '最新文章'),
(DISPLAY_HOT, '最热文章'),
(DISPLAY_COMMENT, '最近评论'),
)
title = models.CharField(max_length=50, verbose_name='标题')
status = models.PositiveIntegerField(default=STATUS_HIDE, choices=STATUS_ITEMS, verbose_name='状态')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
owner = models.ForeignKey(User, verbose_name='作者')
content = models.CharField(max_length=500, blank=True, verbose_name='内容',
help_text='如果设置不是HTML,可为空')
display_type = models.PositiveIntegerField(default=DISPLAY_HTML, choices=SIDE_TYPE, verbose_name='展示类型')
def __str__(self):
return self.title
class Meta:
verbose_name_plural = verbose_name = '侧边栏'
@classmethod
def get_sidebars(cls):
return cls.objects.filter(status=cls.STATUS_SHOW)
@property
def content_html(self):
result = ''
from blog.models import Post
from comment.models import Comment
if self.display_type == self.DISPLAY_HTML:
result = self.content
elif self.display_type == self.DISPLAY_LATEST:
context = {
'posts': Post.latest_posts()
}
result = render_to_string('config/blocks/sidebar_posts.html', context)
elif self.display_type == self.DISPLAY_HOT:
context = {
'posts': Post.hot_posts()
}
result = render_to_string('config/blocks/sidebar_posts.html', context)
elif self.display_type == self.DISPLAY_COMMENT:
context = {
'comments': Comment.objects.filter(status=Comment.STATUS_NORMAL)
}
result = render_to_string('config/blocks/sidebar_comments.html', context)
return result
| [
"568726669@qq.com"
] | 568726669@qq.com |
e58160be043c25f1567117706578c6627e844ccb | bf72636241a871d9a7519a577395f9d1fd7b38c2 | /tools_box/_selling/doctype/daily_route_activity/daily_route_activity.py | cfad18b8a1834abd2997ab43008e8996ba9faa94 | [
"MIT"
] | permissive | Athenolabs/Tools-Box | fc6400d9d88cc8ba0a3d48e38a0918f0022ce914 | c4e4e368a0bec115f84bc33ae011d7e0fd02932f | refs/heads/master | 2021-01-23T10:58:36.243182 | 2017-05-30T13:44:04 | 2017-05-30T13:44:04 | 93,116,515 | 2 | 1 | null | 2017-06-02T01:58:32 | 2017-06-02T01:58:31 | null | UTF-8 | Python | false | false | 284 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DailyRouteActivity(Document):
pass
| [
"masonarmani38@gmail.com"
] | masonarmani38@gmail.com |
8d9fd56181d25fbff2f7f82a19a948a12105bc70 | 849e8fb216a130ef1c438f46f3a63592537368c5 | /zhaopin/zhaopin.py | ea8067c3ad9188815f33f97a0ab144b647305142 | [] | no_license | xyls2011/python | 978db70a735fe2e2168ca7cf50a4e6aa07e04888 | 63ef2b3f4cb00ef23b16aa72785bcda1a2b7b756 | refs/heads/master | 2020-06-01T07:59:54.796530 | 2019-06-21T12:35:50 | 2019-06-21T12:35:50 | 190,709,781 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | from requests_html import HTMLSession
import re
from matplotlib import pyplot as plt
salary_element = '<p.*>(\d+)K-(\d+)K</p>'
salary = []
disabled_button_element = '<button.* disabled="disabled">下一页</button>'
disabled_button = None
p=1
while not disabled_button:
print('crawling page ' + str(p))
link = 'https://sou.zhaopin.com/?p=' + str(p) + '&jl=530&kw=爬虫工程师&kt=3'
session = HTMLSession()
page = session.get(link)
page.html.render(sleep=3)
salary += re.findall(salary_element,page.html.html)
disabled_button = re.findall(disabled_button_element, page.html.html)
p += 1
if p >= 2:
break
session.close()
print(salary)
# 求出每家公司的平均薪资,比如 [12, 15] 的平均值为 13
salary = [(int(s[0]) + int(s[1])) / 2 for s in salary]
# 划定薪资范围,便于展示,你也可以尝试其它展示方案
low_salary, middle_salary, high_salary = [0, 0, 0]
for s in salary:
if s <= 15:
low_salary += 1
elif s > 15 and s <= 30:
middle_salary += 1
else:
high_salary += 1
# 调节图形大小,宽,高
plt.figure(figsize=(6, 9))
# 定义饼状图的标签,标签是列表
labels = ['Python salary < 15K', 'Python salary = 15K~30K', 'Python salary > 30K']
data = [low_salary, middle_salary, high_salary]
plt.pie(data, labels=labels)
# 设置x,y轴刻度一致,这样饼图才能是圆的
plt.axis('equal')
plt.legend()
plt.show() | [
"1104869984@qq.com"
] | 1104869984@qq.com |
15f753d76464d7abfd4fcf2a4b8dd8743d72fd97 | 462a30862d0303d1d1beeebb2d33bb2a625d5336 | /catchpy/settings/local.py | 995d6763bc4efc46baa39e79fbf3ac479732de8e | [] | no_license | nmaekawa/catchpy | 5eca9715c23e71ce4f6ef489607da0b0e46a14a3 | 50783648804e5b6ce57dcb7d00ba1038fd23ffdc | refs/heads/master | 2023-08-03T09:25:44.838480 | 2023-04-18T19:05:20 | 2023-04-18T19:05:20 | 98,905,832 | 10 | 3 | null | 2023-08-14T18:47:50 | 2017-07-31T15:50:19 | Python | UTF-8 | Python | false | false | 506 | py | from .dev import *
DEBUG = True
# Django Extensions
# http://django-extensions.readthedocs.org/en/latest/
try:
import django_extensions
INSTALLED_APPS += ['django_extensions']
except ImportError:
pass
# Django Debug Toolbar
# http://django-debug-toolbar.readthedocs.org/en/latest/
try:
import debug_toolbar
INSTALLED_APPS += ['debug_toolbar']
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware']
DEBUG_TOOLBAR_PATCH_SETTINGS = True
except ImportError:
pass
| [
"nmaekawa@g.harvard.edu"
] | nmaekawa@g.harvard.edu |
b8405ccbf1b037622cfb344604a81fcef9306518 | 1f5f8f95530003c6c66419519d78cb52d21f65c0 | /projects/golem_gui/tests/users/create_user/add_project_permission.py | 5ac16e37d8543470a90751eb0751b5bc624ee3b4 | [] | no_license | golemhq/golem-tests | c5d3ab04b1ea3755d8b812229feb60f513d039ac | dff8fd3a606c3d1ef8667aece6fddef8ac441230 | refs/heads/master | 2023-08-17T23:05:26.286718 | 2021-10-04T20:34:17 | 2021-10-04T20:34:17 | 105,579,436 | 4 | 1 | null | 2018-11-19T00:14:24 | 2017-10-02T20:05:55 | Python | UTF-8 | Python | false | false | 513 | py | from golem import actions
from projects.golem_gui.pages import common
from projects.golem_gui.pages.users import create_user
def setup(data):
common.access_golem(data.env.url, data.env.admin)
create_user.navigate_to_page()
def test(data):
project = 'project1'
permission = 'admin'
create_user.select_project(project)
create_user.select_permission(permission)
actions.click(create_user.add_permission_button)
create_user.assert_project_permission_in_table(project, permission)
| [
"luciano@lucianorenzi.com"
] | luciano@lucianorenzi.com |
f36d77833c5ad2f75fe439282ecc35502d0153ca | 2ff5cf209663c660b38092e2c02f82b8114062fa | /api/utils/htmlreport.py | a160de4e2fb0f41a418318c879133edd4ab4770c | [] | no_license | wuchf/shuangshi-api-test | c48791c1e0a5e18c0fb4a252178514a19e2678ee | 473e933a751ee6485bfb7dc3455c91eaf8471ff5 | refs/heads/master | 2020-05-04T15:34:25.973665 | 2019-04-03T08:54:55 | 2019-04-03T08:54:55 | 179,246,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,320 | py | import sys
HTML_TMP='''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<link href="../../static/css/bootstrap.min.css" rel="stylesheet">
<script src="../../static/js/jquery-3.2.1.min.js"></script>
<script src="../../static/js/bootstrap.min.js"></script>
<script src="http://cdn.hcharts.cn/highcharts/highcharts.js"></script>
<style type="text/css">
#chart{
width: 550px;
height: 500px;
margin: 0;
}
.fail{
color: red;
font-weight:bold
}
td{
{#max-width: 80px;#}
word-wrap: break-word;
table-layout:fixed;
word-break:break-all;
}
</style>
<title>%(title)s</title>
</head>
<body>
%(script)s
%(body)s
</body>
'''
HTML_SCRIPT='''
<script language="JavaScript">
$(document).ready(function() {
var chart = {
plotBackgroundColor: null,
plotBorderWidth: null,
plotShadow: false
};
var title = {
text: '测试结果比例'
};
var tooltip = {
pointFormat: '{series.name}:{point.percentage:.2f}%%'
};
var plotOptions = {
pie: {
allowPointSelect: true,
cursor: 'pointer',
dataLabels: {
enabled: true,
format: '<b>{point.name}</b>: {point.percentage:.2f}%%',
style: {
color: (Highcharts.theme && Highcharts.theme.contrastTextColor) || 'black'
}
}
}
};
var series= [{
type: 'pie',
name: '比例',
data: [
['error', %(fail)d],
{
name: 'pass',
y: %(success)d,
sliced: true,
selected: true
},
]
}];
var json = {};
json.chart = chart;
json.title = title;
json.tooltip = tooltip;
json.series = series;
json.plotOptions = plotOptions;
$('#chart').highcharts(json);
});
function clickall() {
$("#all").toggle();
$("#fail").hide();
}
function clickfail() {
$("#fail").toggle();
$("#all").hide();
}
</script>
'''
HTML_BODY='''
<div class="container" >
<h2>%(title)s</h2>
<div class="col-md-12">
<div class="col-md-3">
<h3>测试概要信息</h3>
<p> 总计运行时间:%(time)s</p>
<p> 运行用例总数:<a href='javascript:clickall()'>%(total)s</a>,
<p> 执行成功用例数:%(success)s</p>
<p> 执行失败用例数:%(fail)s</p>
<p><a href="#" onclick="clickall();return false;">查看结果详情</a></p>
<p><a href="#" onclick="clickfail();return false;">查看失败信息</a></p>
</div>
<div class="col-md-9">
<div id="chart" style="width: 650px; height: 350px; margin: 0 auto"></div>
</div>
</div>
<div id='all' style="display: none">
<h3>全部详细信息</h3>
<table class="table table-striped table-bordered table-hover" width="100">
<caption>详细结果</caption>
<th >用例名</th>
<th >用例序号</th>
<th >pass/fail</th>
<th >预期结果</th>
<th >实际结果</th>
%(tr)s
</table>
</div>
<div id='fail' style="display: none">
<h3>失败详细信息</h3>
<table class="table table-striped table-bordered table-hover" width="100">
<caption>详细结果</caption>
<th >用例名</th>
<th >用例序号</th>
<th >fail</th>
<th >预期结果</th>
<th >实际结果</th>
%(err)s
</table>
</div>
</div>
'''
TABLE_INFO='''
<tr>
<td width="15%%" rowspan="%(n)s">%(name)s</td>
<td width="7%%" rowspan="%(n)s">%(seqid)s</td>
<td width="8%%"class="%(result)s" rowspan="%(n)s">%(result)s</td>
</tr>
%(res)s
'''
TR = '''
<tr><td width="35%%">%s</td><td width="35%%">%s</td></tr>
'''
class result():
DEFAULT_TITLE = '测试报告'
def __init__(self,results,t_begin,t_end,stream=sys.stdout,title=None):
self.stream=stream
if title:
self.title=title
else:
self.title=self.DEFAULT_TITLE
self.results=results
self.fail=[x for x in self.results if x['result'] == 'fail']
self.success=[x for x in self.results if x['result'] == 'pass']
self.time=t_end-t_begin
def _generate_data(self):
#所有信息
rows=[]
for info in self.results:
exp = info.get('expect').split('\n')
n = len(exp)
info['n'] = n + 1
tr = ''
if n > 1:
for i in range(len(exp)):
if isinstance(info.get('response'),list):
tr += TR % (exp[i], info.get('response')[i])
else:
tr += TR % (exp[i], info.get('response'))
else:
tr+=TR%(info.get('expect'),info.get('response'))
info['res'] = tr
row=TABLE_INFO % info
rows.append(row)
#失败信息显示
fails_li = []
if len(self.fail)>0:
for err in self.fail:
exp = err.get('expect').split('\n')
n = len(exp)
err['n'] = n + 1
tr = ''
if n > 1:
for i in range(len(exp)):
if isinstance(info.get('response'), list):
tr += TR % (exp[i], err.get('response')[i])
else:
tr += TR % (exp[i], err.get('response'))
else:
tr += TR % (err.get('expect'), err.get('response'))
err['res'] = tr
fail = TABLE_INFO % err
fails_li.append(fail)
else:
pass
# fail = '''<tr>无错误信息</tr>'''
body = HTML_BODY % dict(
err=''.join(fails_li),
tr=''.join(rows),
title=self.title,
time=self.time,
total=len(self.results),
success=len(self.success),
fail=len(self.fail),
)
return body
def chart(self):
if len(self.results)==0:
chart = HTML_SCRIPT % dict(
fail=0,
success=0,
)
else:
chart=HTML_SCRIPT % dict(
fail=len(self.fail)/len(self.results)*100,
success = len(self.success) / len(self.results) * 100,
)
return chart
def generatereport(self):
output=HTML_TMP %dict(
title=self.title,
body=self._generate_data(),
script=self.chart()
)
self.stream.write(output.encode('utf-8'))
if __name__ == '__main__':
fp=open("result.html",'wb')
time='0.12'
#results=[{'name': 'login', 'seqid': 1, 'expect': "'status','200'", 'result': 'fail', 'response': '{"avatar":null,"businessId":"40288b155f4d4c0c015f4d9a2eef00d1","userType":"2","loginName":"xiangjiaopi","token":"fab340e5229f4dc48e95ea583abd18cd","point":0,"areaCode":"020","areaName":"广州","liveNum":null,"classId":null,"classNum":null,"yunXinId":"40288b155f4d4c0c015f4d9a2eef00d1","yunXinToken":"9c0d6e6ec1ba4921a9677813000e7e03","stuNum":0}'}, {'name': 'login', 'seqid': 2, 'expect': '1000', 'result': 'pass', 'response': '{"message":"用户名或密码错误","status":"error"}'}]
results=[{'name': 'ws通信', 'seqid': 1, 'expect': 'reply.*?"code":0.*?"msg":"ok"', 'response': ['{"type":"reply","cmd":"join","code":0,"msg":"ok","reqid":123}'], 'result': 'pass'}, {'name': 'ws通信', 'seqid': 2, 'expect': 'reply.*?"code":0.*?"msg":"ok"', 'response': ['{"type":"reply","cmd":"heart","code":0,"msg":"ok","reqid":123}'], 'result': 'pass'}, {'name': 'ws通信', 'seqid': 3, 'expect': 'reply.*?"code":0.*?"msg":"ok"', 'response': ['{"type":"reply","cmd":"get_room_users","code":0,"msg":"ok","reqid":123,"data":[{"room_id":"r_65","user_id":"t_91","role":"t","guid":"6c983908fb4635f6777457d9647d5604","user_info":{"audio_status":0,"stream_id":"peiyou-o2o_beijing2_beijingerxiao2&__03"},"login_time":1539155211}]}'], 'result': 'pass'}, {'name': 'ws通信', 'seqid': 4, 'expect': 'reply.*?"code":-15', 'response': ['{"type":"reply","cmd":"send","code":-15,"msg":"all user_id does not exist","reqid":123}'], 'result': 'pass'}, {'name': 'ws通信', 'seqid': 5, 'expect': 'reply.*?"code":0.*?"msg":"ok"', 'response': ['{"type":"reply","cmd":"update_user_info","code":0,"msg":"ok","reqid":null}'], 'result': 'pass'}, {'name': 'ws通信', 'seqid': 6, 'expect': 'reply.*?"code":0.*?"msg":"ok"', 'response': ['{"type":"reply","cmd":"quit","code":0,"msg":"ok","reqid":123}'], 'result': 'pass'}]
res=result(results,time,fp)
res.generatereport()
#print (res.chart())
| [
"noreply@github.com"
] | noreply@github.com |
a976c47dc9b2b9ea9159315a66f28ffebac319cd | 05fd81538aa32c0c3fa3e8762a5146342478421b | /NN_skin_cancer/cfgs/2020/ham_effb1_meta_nl.py | c913d659d2cd0126b43f716e7e0d327c2ecd9c0f | [
"MIT"
] | permissive | GavinHU66/DebugEntity | b0a23b51b339a4f70dcb98f6cc419e151cc94420 | 21f38f01bdfbbc363a73f640331c6f04a121cf82 | refs/heads/master | 2023-04-11T10:25:20.543185 | 2021-04-14T05:20:25 | 2021-04-14T05:20:25 | 300,828,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,021 | py | import numpy as np
def init(mdlParams_):
mdlParams = {}
# Save summaries and model here
mdlParams['saveDir'] = './models/model_ham_effb1_meta_nl'
mdlParams['model_load_path'] = './models/model_ham_effb1'
# Data is loaded from here
mdlParams['dataDir'] = './Data'
mdlParams['with_meta'] = True
mdlParams['load_previous'] = True
mdlParams['meta_path'] = './ham_meta.pkl'
### Model Selection ###
mdlParams['model_type'] = 'efficientnet-b1'
mdlParams['numClasses'] = 7
mdlParams['balance_classes'] = 7
mdlParams['numOut'] = mdlParams['numClasses']
# Scale up for b1-b7
mdlParams['crop_size'] = [280, 280]
mdlParams['input_size'] = [240, 240, 3]
mdlParams['focal_loss'] = True
### Training Parameters ###
# Batch size
mdlParams['batchSize'] = 20 # *len(mdlParams['numGPUs'])
# Initial learning rate
mdlParams['learning_rate'] = 0.000015 # *len(mdlParams['numGPUs'])
# Lower learning rate after no improvement over 100 epochs
mdlParams['lowerLRAfter'] = 25
# If there is no validation set, start lowering the LR after X steps
mdlParams['lowerLRat'] = 50
# Divide learning rate by this value
mdlParams['LRstep'] = 5
# Maximum number of training iterations
mdlParams['training_steps'] = 60
# Display error every X steps
mdlParams['display_step'] = 2
# Scale?
mdlParams['scale_targets'] = False
# Peak at test error during training? (generally, dont do this!)
mdlParams['peak_at_testerr'] = False
# Print trainerr
mdlParams['print_trainerr'] = False
# Subtract trainset mean?
mdlParams['subtract_set_mean'] = False
mdlParams['setMean'] = np.array([0.0, 0.0, 0.0])
mdlParams['setStd'] = np.array([1.0, 1.0, 1.0])
# Cross validation
mdlParams['fold'] = 5
# Data AUG
# mdlParams['full_color_distort'] = True
mdlParams['autoaugment'] = False
mdlParams['flip_lr_ud'] = True
mdlParams['full_rot'] = 180
mdlParams['scale'] = (0.8, 1.2)
mdlParams['shear'] = 10
mdlParams['cutout'] = 16
mdlParams['only_downsmaple'] = False
# Meta settings
mdlParams['meta_features'] = ['age_0.0', 'age_5.0',
'age_10.0', 'age_15.0', 'age_20.0', 'age_25.0', 'age_30.0', 'age_35.0',
'age_40.0', 'age_45.0', 'age_50.0', 'age_55.0', 'age_60.0', 'age_65.0',
'age_70.0', 'age_75.0', 'age_80.0', 'age_85.0', 'sex_female',
'sex_male', 'sex_unknown']
mdlParams['fc_layers_before'] = [256, 256]
# Factor for scaling up the FC layer
scale_up_with_larger_b = 1.0
mdlParams['fc_layers_after'] = [int(1024 * scale_up_with_larger_b)]
mdlParams['freeze_cnn'] = True
mdlParams['learning_rate_meta'] = 0.00001
# Normal dropout in fc layers
mdlParams['dropout_meta'] = 0.4
return mdlParams
| [
"ec2-user@ip-172-31-16-196.us-east-2.compute.internal"
] | ec2-user@ip-172-31-16-196.us-east-2.compute.internal |
59dd09fa952c05fb2664214cd30c0473025458e0 | 43e53df2f2bc1779c2896541940a235e66a02b02 | /day18/qq发送消息.py | ab63f8dfee03cb49856868ecbdb35ef1e150b795 | [] | no_license | songdanlee/python_code_basic | ddb3276b0473a261423c43d5d8e7a1ff038d5c51 | f32cd4dc9670e55ffa6abe04c9184bfa5d8bbc41 | refs/heads/master | 2020-07-14T21:05:30.471486 | 2019-08-30T14:55:51 | 2019-08-30T14:55:51 | 205,402,606 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | import os
Name = input('Name of the Receiver: ')
Name = '穆梓'
clientDict = {'lz':'513278236',
'穆梓':'318750798'
} # 人名 和对应的 qq号
os.system('start tencent://message/?uin=' + clientDict[Name]) | [
"2533636371@qq.com"
] | 2533636371@qq.com |
41363247c358198e8cecea4460b8076fd9c34398 | 01301e5f486883865e3696f38ef913a232958343 | /antlir/compiler/test_images/print_ok.py | a38dabfa12fe5a4e77e3b8b8fd720897c11764b0 | [
"MIT"
] | permissive | SaurabhAgarwala/antlir | 85fb09c87dafde56622b4107224b41f873f66442 | d9513d35d3eaa9d28717a40057a14d099c6ec775 | refs/heads/main | 2023-06-25T09:05:30.619684 | 2021-07-01T23:04:57 | 2021-07-01T23:06:11 | 382,355,446 | 0 | 0 | MIT | 2021-07-02T13:30:39 | 2021-07-02T13:30:39 | null | UTF-8 | Python | false | false | 294 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
'Prints the unicode string "ok" via the `print` function to `stdout`, on 1 line'
print("ok")
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.