blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 246 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
230e3ed27028d62b79c582aac2591230e2b0549c | 2630b4c15aecc7df964f35862a20d16f513f33bf | /read.py | 1af5f8c9884c11a82ee1f9601d2087ed82efbba2 | [] | no_license | tejas01101001/Intro-to-Webd | 7d7f7dc67b88ef1cfd812ce87efc66aa31852786 | 8ce41a33b4403d1cad605d8003427381e552b3af | refs/heads/master | 2022-02-07T13:43:53.726967 | 2022-02-05T12:55:14 | 2022-02-05T12:55:14 | 161,655,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,316 | py | # Read a json file and print it
import json
import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from torch import nn
from torch import optim
import torchvision
import torch.nn.functional as F
from torchvision import datasets, transforms, models
import torchvision.transforms as transforms
import torch.utils.data as data
from PIL import Image
import os
import os.path
from torchvision.models import resnet50
# # Dataloader for COCO styled dataset
# class DUO(data.Dataset):
# """`MS Coco Detection <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
# Args:
# root (string): Root directory where images are downloaded to.
# annFile (string): Path to json annotation file.
# transform (callable, optional): A function/transform that takes in an PIL image
# and returns a transformed version. E.g, ``transforms.ToTensor``
# target_transform (callable, optional): A function/transform that takes in the
# target and transforms it.
# """
# def __init__(self, root, annFile, transform=None, target_transform=None):
# from pycocotools.coco import COCO
# self.root = root
# self.coco = COCO(annFile)
# self.ids = list(self.coco.imgs.keys())
# self.transform = transform
# self.target_transform = target_transform
# def __getitem__(self, index):
# """
# Args:
# index (int): Index
# Returns:
# tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
# """
# coco = self.coco
# img_id = self.ids[index]
# ann_ids = coco.getAnnIds(imgIds=img_id)
# target = coco.loadAnns(ann_ids)
# path = coco.loadImgs(img_id)[0]['file_name']
# img = Image.open(os.path.join(self.root, path)).convert('RGB')
# if self.transform is not None:
# img = self.transform(img)
# if self.target_transform is not None:
# target = self.target_transform(target)
# return img, target
# def __len__(self):
# return len(self.ids)
# def __repr__(self):
# fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
# fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
# fmt_str += ' Root Location: {}\n'.format(self.root)
# tmp = ' Transforms (if any): '
# fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
# tmp = ' Target Transforms (if any): '
# fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
# return fmt_str
# transform = torchvision.transforms.Compose([
# torchvision.transforms.ToTensor(),
# torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225]),
# ])
# data = DUO('/home/tejas/DUO/images/test/', '/home/tejas/DUO/annotations/instances_test.json',transform=transform)
# train_loader = DataLoader(data, batch_size=1, shuffle=True, num_workers=0)
# class DETR(nn.Module):
# """
# Demo DETR implementation.
# Demo implementation of DETR in minimal number of lines, with the
# following differences wrt DETR in the paper:
# * learned positional encoding (instead of sine)
# * positional encoding is passed at input (instead of attention)
# * fc bbox predictor (instead of MLP)
# The model achieves ~40 AP on COCO val5k and runs at ~28 FPS on Tesla V100.
# Only batch size 1 supported.
# """
# def __init__(self, num_classes, hidden_dim=256, nheads=8,
# num_encoder_layers=6, num_decoder_layers=6):
# super().__init__()
# # create ResNet-50 backbone
# self.backbone = resnet50()
# del self.backbone.fc
# # create conversion layer
# self.conv = nn.Conv2d(2048, hidden_dim, 1)
# # create a default PyTorch transformer
# self.transformer = nn.Transformer(
# hidden_dim, nheads, num_encoder_layers, num_decoder_layers)
# # prediction heads, one extra class for predicting non-empty slots
# # note that in baseline DETR linear_bbox layer is 3-layer MLP
# self.linear_class = nn.Linear(hidden_dim, num_classes + 1)
# self.linear_bbox = nn.Linear(hidden_dim, 4)
# # output positional encodings (object queries)
# self.query_pos = nn.Parameter(torch.rand(100, hidden_dim))
# # spatial positional encodings
# # note that in baseline DETR we use sine positional encodings
# self.row_embed = nn.Parameter(torch.rand(50, hidden_dim // 2))
# self.col_embed = nn.Parameter(torch.rand(50, hidden_dim // 2))
# def forward(self, inputs):
# # propagate inputs through ResNet-50 up to avg-pool layer
# x = self.backbone.conv1(inputs)
# x = self.backbone.bn1(x)
# x = self.backbone.relu(x)
# x = self.backbone.maxpool(x)
# x = self.backbone.layer1(x)
# x = self.backbone.layer2(x)
# x = self.backbone.layer3(x)
# x = self.backbone.layer4(x)
# # convert from 2048 to 256 feature planes for the transformer
# h = self.conv(x)
# # construct positional encodings
# H, W = h.shape[-2:]
# pos = torch.cat([
# self.col_embed[:W].unsqueeze(0).repeat(H, 1, 1),
# self.row_embed[:H].unsqueeze(1).repeat(1, W, 1),
# ], dim=-1).flatten(0, 1).unsqueeze(1)
# # propagate through the transformer
# h = self.transformer(pos + 0.1 * h.flatten(2).permute(2, 0, 1),
# self.query_pos.unsqueeze(1)).transpose(0, 1)
# # finally project transformer outputs to class labels and bounding boxes
# return {'pred_logits': self.linear_class(h),
# 'pred_boxes': self.linear_bbox(h).sigmoid()}
# model = DETR(num_classes=4)
# model.train()
# # Train the model
# optimizer = optim.Adam(model.parameters(), lr=0.001)
# # Define Device
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# # Move model to device
# model = model.to(device)
# for i, sample in enumerate(train_loader):
# print("HI")
# image = sample['image']
# annotation = sample['annotation']
# category = sample['category']
# print(category['name'])
# print(annotation['bbox'])
# print(annotation['segmentation'])
# #
# image = image.numpy()
# image = np.transpose(image, (1, 2, 0))
# plt.imshow(image)
# plt.show()
# if i == 3:
# break
# print(image)
import csv
filename="test.csv"
file = open('/home/tejas/DUO/annotations/instances_test.json', 'r')
data=json.loads(file.read())
images: ['file_name', 'height', 'width', 'id']
annotations: ['segmentation', 'area', 'iscrowd', 'image_id', 'bbox', 'category_id', 'id', 'ignore']
categories: ['name', 'id']
fields = ['image_id','width','height','bbox','source']
rows = []
for key,value in data.items():
if(key == 'annotations'):
for i in range(len(value)):
# print(data['images'][value[i]['image_id']-1]['id'],end=",")
# print(data['images'][value[i]['image_id']-1]['width'],end=",")
# print(data['images'][value[i]['image_id']-1]['height'],end=",")
# print(value[i]['bbox'],end=",")
# print(data['categories'][value[i]['category_id']-1]['name'])
rows.append([data['images'][value[i]['image_id']-1]['id'],data['images'][value[i]['image_id']-1]['width'],data['images'][value[i]['image_id']-1]['height'],value[i]['bbox'],data['categories'][value[i]['category_id']-1]['name']])
with open(filename, 'w') as csvfile:
# creating a csv writer object
csvwriter = csv.writer(csvfile)
# writing the fields
csvwriter.writerow(fields)
# writing the data rows
csvwriter.writerows(rows)
# rootdir ='/home/tejas/DUO/images/train'
# for i in range(9):
# img = mpimg.imread(rootdir + data['images'][i]['file_name'])
# plt.subplot(331 + i)
# plt.imshow(img)
# plt.axis('off')
# plt.title(data['images'][i]['file_name'])
# plt.show()
# for i in range(9):
# img = mpimg.imread(rootdir + data['images'][i]['file_name'])
# plt.subplot(331 + i)
# plt.imshow(img)
# plt.axis('off')
# plt.title(data['images'][i]['file_name'])
# for j in range(len(data['annotations'])):
# #print(data['annotations'][j]['image_id'])
# if data['annotations'][j]['image_id'] == (i+1):
# # Plot a rectangle for a bounding box
# anchor = data['annotations'][j]['bbox'][0:2]
# width = data['annotations'][j]['bbox'][2]
# height = data['annotations'][j]['bbox'][3]
# plt.gca().add_patch(plt.Rectangle(anchor, width, height, fill=False, edgecolor='red', linewidth=2))
# plt.gca().text(data['annotations'][j]['bbox'][0], data['annotations'][j]['bbox'][1], data['categories'][data['annotations'][j]['category_id']-1]['name'], bbox={'facecolor':'white', 'alpha':0.5, 'pad':1})
# plt.show()
| [
"noreply@github.com"
] | tejas01101001.noreply@github.com |
0e61c375d1ae5de076e85565992c7111607dea39 | 6c5daca39b09b99f901a9c880eeccec8990ddb57 | /graph-1.py | 7fcf928a57b26e3e87d4d6230738310ac39475bd | [] | no_license | wshBak/DataStructuresandAlgorithms | 586b600203ce77fbd3641aa5d797e0d188261a61 | eb9568d4981f233d7ee232558d1d179e261c1bc6 | refs/heads/master | 2020-04-19T15:55:54.449873 | 2019-01-30T06:13:40 | 2019-01-30T06:13:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | from copy import deepcopy
# computing 2-hop neighbors, using adjacency matrix
def two_hop(G):
G2 = deepcopy(G)
n = len(G)
for i in range(n):
for j in range(n):
if G2[i][j]: continue
for k in range(n):
if G[i][k]==G[k][j]==1:
G2[i][j]==1
break
return G2
# computing closure graph of G
def closure(G):
G_new, G_old = two_hop(G), G
while G_new != G_old:
G_new, G_old = two_hop(G_new), G_new
return G_new
G = [[1,0],[0,1]]
print(two_hop(G))
| [
"noreply@github.com"
] | wshBak.noreply@github.com |
e2e2a92911571cea2a9d98533be6aee4f53dafc4 | 5e726f41a95e1fc79ed98b777ec85a386f7c7a13 | /Model/SqlAlchemy/Weibo/UserItemModel.py | f58664278378db417b03b6103d85c8edd0e81ac1 | [] | permissive | 825477418/XX | a3b43ff2061f2ec7e148671db26722e1e6c27195 | bf46e34749394002eec0fdc65e34c339ce022cab | refs/heads/master | 2022-08-02T23:51:31.009837 | 2020-06-03T13:54:09 | 2020-06-03T13:54:09 | 262,987,137 | 0 | 0 | MIT | 2020-06-03T13:54:10 | 2020-05-11T08:43:30 | null | UTF-8 | Python | false | false | 1,223 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/25 16:53
# @File : UserItemModel
# @Des :
# @Email : billsteve@126.com
import XX.Model.SqlAlchemy.BaseModel as BM
from sqlalchemy import Column, String
from sqlalchemy.dialects.mysql import INTEGER, TINYINT
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class UserItem(Base, BM.BaseModel):
__tablename__ = 'user_items'
id = Column(INTEGER(11), primary_key=True)
uid = Column(INTEGER(11))
item_name = Column(String(60))
item_content = Column(String(255))
is_del = Column(TINYINT(1))
create_ts = Column(INTEGER(11))
update_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.create_ts = kw.get("create_ts", None)
self.id = kw.get("id", None)
self.is_del = kw.get("is_del", None)
self.item_content = kw.get("item_content", None)
self.item_name = kw.get("item_name", None)
self.metadata = kw.get("metadata", None)
self.uid = kw.get("uid", None)
self.update_ts = kw.get("update_ts", None)
if __name__ == '__main__':
BM.createInitFunction(UserItem)
| [
"billsteve@126.com"
] | billsteve@126.com |
00b4a41f2ad39b55e50c19204fe9a4c3b544db35 | f854012bf8c055210bafd76dad0228dc504e2909 | /TwitterScraper.py | 9e16d9a4bb51d050490c3c1a13cc4d9851581eb1 | [] | no_license | mburakeker/Twitter-Sentiment-Analysis | 3c33c198f5dba320984a558103db4075ab73d302 | ea8a002318e9b6b7300c185fa03b16e6aedf1191 | refs/heads/master | 2020-04-01T17:26:36.763623 | 2018-10-17T09:22:52 | 2018-10-17T09:22:52 | 153,429,965 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | try:
import json
except ImportError:
import simplejson as json
from twitter import Twitter, OAuth, TwitterHTTPError, TwitterStream
import re
from collections import Counter
import pandas as pd
from __init__ import *
def Scrape():
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter = Twitter(auth=oauth)
iterator = twitter.search.tweets(q=TWEET_HASHTAG,count=100,tweet_mode='extended',lang=TWEET_LANGUAGE,exclude="retweets")
df = pd.DataFrame(columns=['Tweet_ID','User_ID','Text','Favourite_Count'])
for i in iterator['statuses']:
df.loc[len(df)] = [i['id'],i['user']['id'],i['full_text'],i['favorite_count']]
lowid = df["Tweet_ID"].min(),
while(df["Tweet_ID"].count()<MAX_TWEET_COUNT):
iterator = twitter.search.tweets(q=TWEET_HASHTAG,count=100,tweet_mode='extended',lang=TWEET_LANGUAGE,max_id=lowid,exclude="retweets")
for i in iterator['statuses']:
df.loc[len(df)] = [i['id'],i['user']['id'],i['full_text'],i['favorite_count']]
print(str(df["Tweet_ID"].count()),end=" - ")
lowid = df["Tweet_ID"].min()
df = df.sort_values(by='Favourite_Count',ascending=False)
print(df["Tweet_ID"].count())
df = df.drop_duplicates(keep="first")
df.to_csv("sampledataset.csv", encoding='utf-8', index=False)
| [
"Mehmet Burak Eker"
] | Mehmet Burak Eker |
7114e9711d1fbc991de8b742ae0bf07752a0f14a | 8d2b1765701f94f57d668200f042ef4a124dcc50 | /pythonOpenCV/src/opencv.py | 1d9d2813753a6ee4375bad90df69d31e5e76835b | [] | no_license | matthew-d-stringer/RandomProgramming | d221e5e355880a8375bd1d298e10bac4bdb7a82a | c70f5aff8e5a0b0c9e5939cd427d82882e7fce97 | refs/heads/master | 2022-10-16T04:40:32.674982 | 2019-03-18T06:34:06 | 2019-03-18T06:34:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | import cv2
import numpy as numpy
from grip import GripPipeline
def processImg(dir, gripCode):
img = cv2.imread(dir, cv2.IMREAD_COLOR)
img = cv2.resize(img, (320,240))
out = gripCode.process(img)
scale = 2
out = cv2.resize(out, (320*scale,240*scale))
cv2.imshow(dir,out)
dirBegin = '../resources/'
gripCode = GripPipeline()
processImg(dirBegin+'cargoship.png',gripCode)
processImg(dirBegin+'cargoship2.png',gripCode)
processImg(dirBegin+'cargoshipNoise.png',gripCode)
processImg(dirBegin+'confusingTarget.png',gripCode)
processImg(dirBegin+'missingPart.png',gripCode)
processImg(dirBegin+'multipleTargets.png',gripCode)
processImg(dirBegin+'target.png',gripCode)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"mdsflyboy@gmail.com"
] | mdsflyboy@gmail.com |
7aa20754e73500584b16fa11fcb4e735dcf67642 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_203/708.py | 44fcd75045e3a2a33513ec3f016b44d1ae028a58 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | import sys
testnum = int(input().strip())
for k in range(testnum):
R, C = map(int, input().strip().split(" "))
lines = []
string_set = []
first_line_flag = 0
for _ in range(R):
string_set.append(input().strip())
for i in range(R):
nonq_p = []
nonq_l = []
string = string_set[i]
line = ''
if i == 0 and string == '?'*C:
lines.append('0')
first_line_flag += 1
elif i == first_line_flag and string == '?'*C:
lines.append('0')
first_line_flag += 1
else:
for j in range(C):
if string[j] != '?':
nonq_p.append(j)
nonq_l.append(string[j])
len_non = len(nonq_p)
if len_non == 1:
line += nonq_l[0]*C
elif len_non == 0:
line += lines[i-1]
else:
for l in range(len_non):
if l == 0:
line += nonq_l[0] * nonq_p[1]
elif l == len_non-1:
line += nonq_l[l] * (C-nonq_p[l])
else:
line += nonq_l[l] * (nonq_p[l+1]-nonq_p[l])
lines.append(line)
print("Case #"+str(k+1)+":")
if first_line_flag == 0:
print('\n'.join(lines))
else:
for a in range(first_line_flag):
lines[a] = lines[first_line_flag]
print('\n'.join(lines))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
66a207690dc50d54e0b9e6421f8fe985d9860e0b | 97eaaa88e0094cfbb40d38937e49b013468b6dce | /spotify/Lib/site-packages/mockups/helpers.py | 03babc1324160f28de12958a5a243d4120daa6dc | [] | no_license | didiacosta/Backend | cd652bdd1a7ec971157a539cb066efe50b024670 | c3bc7053cb579244ad7c9b0c9690d983381f9b15 | refs/heads/master | 2022-11-09T21:23:43.053107 | 2016-04-06T15:44:48 | 2016-04-06T15:44:48 | 55,620,174 | 0 | 1 | null | 2022-10-19T14:56:09 | 2016-04-06T15:58:32 | Python | UTF-8 | Python | false | false | 4,939 | py | # -*- coding: utf-8 -*-
import copy
import warnings
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
__all__ = ('register', 'unregister', 'create', 'create_one', 'autodiscover')
_registry = {}
def get_mockup(model, *args, **kwargs):
'''
Gets an mockup instance for a model
'''
if model not in _registry:
from mockups.base import Mockup
warnings.warn('Model `%s` not in registry' % model.__name__)
cls = Mockup
else:
cls = _registry[model]
return cls(model, *args, **kwargs)
def register(model, mockup_cls, overwrite=False, fail_silently=False):
'''
Register a model with the registry.
Arguments:
*model* can be either a model class or a string that contains the model's
app label and class name seperated by a dot, e.g. ``"app.ModelClass"``.
*mockup_cls* is the :mod:`Mockup` subclass that shall be used to
generated instances of *model*.
By default :func:`register` will raise :exc:`ValueError` if the given
*model* is already registered. You can overwrite the registered *model* if
you pass ``True`` to the *overwrite* argument.
The :exc:`ValueError` that is usually raised if a model is already
registered can be suppressed by passing ``True`` to the *fail_silently*
argument.
'''
from django.db import models
if isinstance(model, basestring):
model = models.get_model(*model.split('.', 1))
if not overwrite and model in _registry:
if fail_silently:
return
raise ValueError(
u'%s.%s is already registered. You can overwrite the registered '
u'mockup class by providing the `overwrite` argument.' % (
model._meta.app_label,
model._meta.object_name,
))
_registry[model] = mockup_cls
def unregister(model_or_iterable, fail_silently=False):
'''
Remove one or more models from the mockups registry.
'''
from django.db import models
if not isinstance(model_or_iterable, (list, tuple, set)):
model_or_iterable = [model_or_iterable]
for model in models:
if isinstance(model, basestring):
model = models.get_model(*model.split('.', 1))
try:
del _registry[model]
except KeyError:
if fail_silently:
continue
raise ValueError(
u'The model %s.%s is not registered.' % (
model._meta.app_label,
model._meta.object_name,
))
def create(model, count, *args, **kwargs):
'''
Create *count* instances of *model* using the either an appropiate
mockup that was :ref:`registry <registry>` or fall back to the
default:class:`Mockup` class. *model* can be a model class or its
string representation (e.g. ``"app.ModelClass"``).
All positional and keyword arguments are passed to the mockup
constructor. It is demonstrated in the example below which will create ten
superusers::
import mockups
mockup = mockups.create('auth.User', 10)
.. note:: See :ref:`Mockup` for more information.
:func:`create` will return a list of the created objects.
'''
from django.db import models
if isinstance(model, basestring):
model = models.get_model(*model.split('.', 1))
mockup = get_mockup(model, *args, **kwargs)
return mockup.create(count)
def create_one(model, *args, **kwargs):
'''
:func:`create_one` is exactly the as the :func:`create` function but a
shortcut if you only want to generate one model instance.
The function returns the instanciated model.
'''
return create(model, 1, *args, **kwargs)[0]
def autodiscover():
"""
Auto-discover INSTALLED_APPS mockup.py modules and fail silently when
not present. This forces an import on them to register any mockup bits they
may want.
"""
global _registry
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's mockup module.
try:
before_import_registry = copy.copy(_registry)
import_module('%s.mockup' % app)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
_registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an mockup module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'mockup'):
raise
| [
"didi_acosta@hotmail.com"
] | didi_acosta@hotmail.com |
31b7b99cf7c1c28c6697a47542c7ce2e3e96cd5f | d0f0462a3782ce794d59e1d31c85a413956fcd16 | /yolo/bin/tensorboard | dcb5a8b9800112139c6ca99b6d41e23c35e74294 | [
"MIT"
] | permissive | gelloguiam/spot-directional-changes | 844c9ade5beab39892711980125000f2b5805b64 | 7183344904f485ae22f16026f98ca524ef48e77c | refs/heads/master | 2023-05-30T11:45:56.536744 | 2021-06-17T16:30:58 | 2021-06-17T16:30:58 | 372,836,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | #!/home/gelloguiam/Desktop/object-tracking/yolov4-deepsort/yolo/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tensorboard.main import run_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_main())
| [
"acguiam@gmail.com"
] | acguiam@gmail.com | |
12bb01b08a32796837ebff59d0f75c72d5feaeae | 67e5a245210762b4af618c3ad6f511b39f1b3910 | /Predict.py | f56cca5275081987304793cd9740121a9e2a04e6 | [] | no_license | beibeisongs/Clothe_Locating_DeepFashion | e33325567f7a418248ae6ce6dd0e5a00580a6fc4 | 4ae1f33ec762b15c65ed18d15d770f5e48f47ae6 | refs/heads/master | 2020-04-02T17:01:42.391051 | 2018-11-01T15:24:03 | 2018-11-01T15:24:03 | 154,640,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | # encoding=utf-8
# Date: 2018-10-27
# Author: MJUZY
import cv2
from keras.models import load_model
import numpy as np
def show_points(x, y, im):
cv2.circle(im, (x, y), 1, (0, 0, 255), 10)
def showPrediction(img_path):
img_ori = cv2.imread(img_path)
img = img_ori / 255
images = cv2.resize(img, (224, 224))
image = np.expand_dims(images, axis=0) # Note: image.shape = <class 'tuple'>: (1, 300, 300, 3)
result = model.predict(image)
content = result[0]
content = content * 224
show_points(int(content[0]), int(content[1]), images)
show_points(int(content[2]), int(content[3]), images)
show_points(int(content[4]), int(content[5]), images)
show_points(int(content[6]), int(content[7]), images)
show_points(int(content[8]), int(content[9]), images)
show_points(int(content[10]), int(content[11]), images)
cv2.imshow("224^2_Images", images)
cv2.waitKey(0)
model = load_model('Clothe_Locating_12points.h5')
model.summary()
img_path = "D:/Dataset_DeepFashion/img_small/" + "Sheer_Woven_Blouse/img_00000101.jpg"
showPrediction(img_path)
| [
"noreply@github.com"
] | beibeisongs.noreply@github.com |
97c88eed0e4437b0909ba2be2df7cd85e9dc66b9 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/2532790568a74d1bb3203027d01c8104.py | 1b8003a012f3b0829c56318a98f2eb2523e3b4a8 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 949 | py | ''' Simple module for talking to Bob
'''
import conversation
class LackadaisicalTeenager(object):
# stub class
pass
class Bob(LackadaisicalTeenager):
''' Bob is a lackadaisical teenager. He is able to respond to simple statements
in a limited manner via the 'hey' method.
'''
_questionResponse = "Sure."
_yellResponse = "Woah, chill out!"
_silenceResponse = "Fine. Be that way!"
_defaultResponse = "Whatever."
def __init__(self):
super(Bob, self).__init__()
question = conversation.Question(Bob._questionResponse)
yell = conversation.Yell(Bob._yellResponse)
silence = conversation.AwkwardSilence(Bob._silenceResponse)
self._conversation = conversation.Conversation(Bob._defaultResponse, question, yell, silence)
def hey(self, statementStr):
''' Get Bobs response to the specified statement.
'''
return self._conversation.respond(statementStr)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
5f5c887abdeee61280dd8c2b3618df34f170e210 | 676cafa1cf3c4cf82aba4482801a97d5663dc691 | /plastex/plasTeX/Renderers/DocBook/__init__.py | 0a25e399b9831c0ed7ab541c6826b86962016850 | [
"MIT"
] | permissive | dvbrydon/plastex-oreilly | 92ef9bbc2d65d9e6285987876785c73c002494fc | 96f10732596c61b8189f4c10730bc7aee70785a8 | refs/heads/master | 2021-01-14T13:48:50.954902 | 2012-07-31T18:41:48 | 2012-07-31T18:41:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,738 | py | #!/usr/bin/env python
import re
from plasTeX.Renderers.PageTemplate import Renderer as _Renderer
class DocBook(_Renderer):
""" Renderer for DocBook documents """
fileExtension = '.xml'
imageTypes = ['.png','.jpg','.jpeg','.gif']
vectorImageTypes = ['.svg']
def cleanup(self, document, files, postProcess=None):
res = _Renderer.cleanup(self, document, files, postProcess=postProcess)
return res
def processFileContent(self, document, s):
s = _Renderer.processFileContent(self, document, s)
# Force XHTML syntax on empty tags
s = re.sub(r'(<(?:hr|br|img|link|meta|col)\b.*?)\s*/?\s*(>)',
r'\1 /\2',
s,
re.I|re.S)
s = re.sub(r'xml:id', r'id',s)
# replace the first chapter with a preface
s = re.sub(r'<chapter', r'<preface', s, count=1)
s = re.sub(r'</chapter>', r'</preface>', s, count=1)
# no space before an indexterm
s = re.sub(r' <indexterm', r'<indexterm', s)
s = re.sub(r'indexterm> ', r'indexterm>', s)
# remove newlines in programlistings
s = re.sub(r'\s*(<programlisting>)\n', r'\1', s)
s = re.sub(r'\n(</programlisting>)\s*', r'\1', s)
# remove para around bookinfo
s = re.sub(r'<para>\s*(<bookinfo>)', r'\1', s)
s = re.sub(r'(</bookinfo>)\s*</para>', r'\1', s)
# remove pointless anchors
s = re.sub(r'\s*(<anchor[^>]*>)\s*', r'',s)
# get rid of empty paragraphs
s = re.sub(r'\s*<para>\s*</para>\s*', r'', s)
s = re.sub(r'\s*<para>\s*</para>\s*', r'', s)
s = re.sub(r'\s*<para>\s*</para>\s*', r'', s)
return s
Renderer = DocBook
| [
"downey@allendowney.com"
] | downey@allendowney.com |
ef5376de98514d66993141393bdc69264b97d558 | ed709ae8127fe36b7d239cf9c054bb6aa3ef868d | /accounts/templatetags/accounts_extras.py | 177835f5ccb2c63c669c212c612201d1c5a7df27 | [] | no_license | BoughezalaMohamedAimen/smartonline | 8ae2722ddb8d827b5d8081bdda0c2fadba15a404 | f8f5c31337c7c38f679529a3fd1b08d0c76be543 | refs/heads/master | 2022-04-14T22:25:46.225539 | 2020-04-02T18:58:59 | 2020-04-02T18:58:59 | 252,542,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | from django import template
register = template.Library()
@register.filter
def add_class(field, class_name):
return field.as_widget(attrs={
"class": " ".join((field.css_classes(), class_name))
})
def htmlattributes(value, arg):
attrs = value.field.widget.attrs
data = arg.replace(' ', ' ')
kvs = data.split(',')
for string in kvs:
kv = string.split(':')
attrs[kv[0]] = kv[1]
rendered = str(value)
return rendered
register.filter('htmlattributes', htmlattributes)
| [
"mamoumou121@gmail.com"
] | mamoumou121@gmail.com |
b8944c85a951a2418304345822782b4ced5c17d2 | bb483b4c71bc14e01c11ace181f7a4da180a7dd8 | /deploy_server.py | 9daa7fc7177e6e1ff427e4df7618b7ccf8ab0189 | [] | no_license | ddccffvv/ddc | 4e7e871c3529cddcb622b2aaaf8a57216a17f79a | 5ed62633a0046646201cb04eac67afeb5e9b96a3 | refs/heads/master | 2021-01-19T22:33:22.494654 | 2013-11-07T15:55:09 | 2013-11-07T15:55:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | import requests, sys
from optparse import OptionParser
from bs4 import BeautifulSoup
usage = "usage: %prog username password orgid netid imageid name password"
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
if len(args) < 7:
print "Not enough arguments: "
print usage
sys.exit()
username = args[0]
password = args[1]
orgid = args[2]
netid = args[3]
imageid = args[4]
name = args[5]
admin_password = args[6]
payload = """<Server xmlns='http://oec.api.opsource.net/schemas/server'>
<name>""" + name + """</name>
<description>test description</description>
<vlanResourcePath>/oec/""" + orgid + """/network/""" + netid + """</vlanResourcePath>
<imageResourcePath>/oec/base/image/""" + imageid + """</imageResourcePath>
<administratorPassword>""" + admin_password + """</administratorPassword>
<isStarted>true</isStarted>
</Server>"""
r = requests.post("https://api-eu.dimensiondata.com/oec/0.9/" + orgid + "/server", data=payload, auth=(username,password))
if r.status_code != 200:
print "error: " + str(r.status_code)
print r.text
sys.exit()
soup = BeautifulSoup(r.text).html.body
print r.text
| [
"stijnmuylle@gmail.com"
] | stijnmuylle@gmail.com |
8c10569cbd15464924113c5cd68a9fce1751682f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03998/s634973860.py | 10610faf3c898bc97f92a22e7c712172dd056575 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | a=input()
b=input()
c=input()
a+="A"
b+="B"
c+="C"
n=a
while True:
if len(n)>1:
k=n[0]
if n==a:
a=a[1:]
elif n==b:
b=b[1:]
else:
c=c[1:]
if k=="a":
n=a
elif k=="b":
n=b
else:
n=c
else:
print(n[-1])
break | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6f6855eab32f085c70b8c72993e1a08b3d780818 | e2618c25960a983ec82142f43fd64926012445f1 | /Single_Tello_Test/tello.py | dfeff9299931fbb8b1da0436be14cb61b758dcc7 | [] | no_license | alefaggravo/backend-drone | 979ad3b0f4c27d44297ac0680516ad829aed79e7 | 25df4a2a33afdc2fbff5b13344283739850106bc | refs/heads/master | 2023-03-15T18:33:39.145398 | 2019-05-20T20:06:04 | 2019-05-20T20:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,200 | py | #!/usr/bin/env python
"""
A class to represent a single Tello drone
"""
import socket
import threading
import time
from stats import Stats
class Tello:
def __init__(self):
self.local_ip = ''
self.local_port = 8889
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd
self.socket.bind((self.local_ip, self.local_port))
self.tello_ip = '192.168.10.1'
self.tello_port = 8889
self.tello_adderss = (self.tello_ip, self.tello_port)
self.log = []
self.MAX_TIME_OUT = 10.0
self.abort = False
def __enter__(self):
print("Tello __enter__ ...")
# thread for receiving cmd ack
self.receive_thread = threading.Thread(name="TelloRecv", target=self._receive_thread)
# self.receive_thread.daemon = True
self.receive_thread.start()
return self
def __exit__(self, exc_type, exc_value, traceback):
print("Tello __exit__ ...")
#self.socket.close()
self.abort = True
self.receive_thread.join()
print("Tello __exit__ done")
def send_command(self, command):
"""
Send a command to the ip address. Will be blocked until
the last command receives an 'OK'.
If the command fails (either b/c time out or error),
will try to resend the command
:param command: (str) the command to send
:param ip: (str) the ip of Tello
:return: The latest command response
"""
self.log.append(Stats(command, len(self.log)))
print('sending command: {} to {}' .format(command, self.tello_ip))
self.socket.sendto(command.encode('utf-8'), self.tello_adderss)
start = time.time()
while not self.log[-1].got_response():
now = time.time()
diff = now - start
if diff > self.MAX_TIME_OUT:
print 'Max timeout exceeded... command %s' % command
# TODO: is timeout considered failure or next command still get executed
# now, next one got executed
return
print 'Done!!! sent command: %s to %s' % (command, self.tello_ip)
def _receive_thread(self):
"""Listen to responses from the Tello.
Runs as a thread, sets self.response to whatever the Tello last returned.
"""
print("Tello thread enter")
while not self.abort:
try:
self.response, ip = self.socket.recvfrom(1024)
print('Recvd from {}: {}'.format(ip, self.response))
self.log[-1].add_response(self.response)
except socket.error, exc:
print("SOCKET ERROR: " + str(exc))
except UnicodeDecodeError as exc:
print("EXCEPTION: " + str(exc))
print("Tello thread exit")
def on_close(self):
pass
# for ip in self.tello_ip_list:
# self.socket.sendto('land'.encode('utf-8'), (ip, 8889))
# self.socket.close()
def get_log(self):
return self.log
| [
"madarp@pobox.com"
] | madarp@pobox.com |
b1d73b8e7891d10a0cbe2ff31ffdefc70a2287d2 | 1cdb0fdc1431e0a59c39ff627c11c77b02ccc8cb | /scripts/SelectLongestContig.py | 4264efa3301975bd664e20e192fe667076547c9f | [] | no_license | ChongLab/B-assembler | fe0e8f4b97f030097b5863b7d493cbe454477b62 | 2e0d8026dd4edf9e75810ecec4a9bd075c8abe25 | refs/heads/master | 2023-03-01T17:07:26.367456 | 2021-02-07T00:14:43 | 2021-02-07T00:14:43 | 329,126,954 | 4 | 1 | null | 2021-02-07T00:14:44 | 2021-01-12T22:18:31 | Python | UTF-8 | Python | false | false | 887 | py | #!/usr/bin/env python
import sys
import numpy as np
fa=sys.argv[1]
outfile=sys.argv[2]
file = open(fa,'r')
output = open(outfile,'w')
current_name =""
max_name=""
maximal = 0
max_string=""
lst=[]
hash_line={}
for line in file:
if line.startswith(">"):
lst.append(line)
current_name=line
else:
if current_name not in hash_line.keys():
hash_line[current_name] = line.strip("\n")
else:
hash_line[current_name]+=line.strip("\n")
for key in hash_line:
if len(hash_line[key]) > maximal:
maximal = len(hash_line[key])
max_name = key
max_string = hash_line[key]
output.write(max_name)
partition = np.ceil(len(max_string)/60)
for idx in range(0,int(partition)):
sequence=max_string[60*idx:60*(idx+1)]
output.write(sequence+'\n')
file.close()
output.close()
| [
"huangf@login001.cm.cluster"
] | huangf@login001.cm.cluster |
70bf85666bbfe41f9db54ff13b0a051f2168b236 | 21208873652ce9a35035801cea488004e337b07b | /modeling/head/CTC.py | 392646d7dd90b4b9de3b0fbff2c1dc64d1abc834 | [
"Apache-2.0"
] | permissive | zlszhonglongshen/crnn.pytorch | 55321a6764a6143be7ab9d2c6b3bcafcdd9470e7 | bf7a7c62376eee93943ca7c68e88e3d563c09aa8 | refs/heads/master | 2022-11-07T22:57:28.983335 | 2020-06-19T03:01:35 | 2020-06-19T03:01:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # -*- coding: utf-8 -*-
# @Time : 2020/6/17 10:57
# @Author : zhoujun
from torch import nn
class CTC(nn.Module):
def __init__(self, in_channels, n_class, **kwargs):
super().__init__()
self.fc = nn.Linear(in_channels, n_class)
def forward(self, x):
return self.fc(x)
| [
"572459439@qq.com"
] | 572459439@qq.com |
a37b5d504fc9797a51f5613b924b1c3e8b6d5d6e | fa73c553505667445bca81807581d1c56b1659c3 | /rotational-cipher/rotational_cipher.py | bdd0e5413206f229c5e39aa11784902774d77fe7 | [] | no_license | wanngyue/exercism-solutions | 623f9b15e6f640f6b95602ce35ccd5c0f56af3c3 | 79961e039a8c2fc8a8b0d696c35c862f9278ee4f | refs/heads/master | 2020-04-02T07:42:04.816761 | 2019-01-30T19:26:01 | 2019-01-30T19:26:01 | 154,210,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from string import ascii_lowercase as letters
def rotate(text, key):
l=letters[key:]+letters[:key]
trans= str.maketrans(letters+letters.upper(),l+l.upper())
return text.translate(trans)
| [
"wyue.leon@gmail.com"
] | wyue.leon@gmail.com |
e911ac92f69435499460db8f70d101c5dc2a02fb | 5b2a8a32784eb8e9a51d7f1614ee43c276a71c8a | /filter_plots.py | b521c54c9bfe1f147ac531fdb31b66548efb2f11 | [] | no_license | googa27/STAGE-1 | 69d3fbc774e94d875f4136c24eab5dfc941e689b | 0680c877e1cc701ad9b7765481186ce496108260 | refs/heads/master | 2022-05-10T19:05:45.487978 | 2019-02-27T02:48:28 | 2019-02-27T02:48:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,456 | py | import openpyxl as pyxl
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import itertools as itr
import time
import os
import datetime
import xlsxwriter
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
from sklearn.neural_network import MLPClassifier
import time
import numbers
#import editdistance as ed
import sys
from bokeh.plotting import figure
from bokeh.io import output_file, show, curdoc
from bokeh.layouts import row, column, widgetbox
from bokeh.models import ColumnDataSource, Slider, Select
############################################# FUNCTIONS FOR HANDLING DATA ####################################################
#input: name of file
#output: date object of the file name
def date(filename):
if type(filename) == datetime.date:
return filename
if filename != filename:
return datrime.date(1914, 1, 1)#default date
if type(filename) != str:
print(filename)
strings = filename.split('.')
strings = strings[0].split('-')
if len(strings) == 3:
return datetime.date(int(strings[0]),
int(strings[1]),
int(strings[2]))
if len(strings) == 2:
return datetime.date(int(strings[0]),
int(strings[1]),
1)
def is_valid_difference(years, months):
if years == 1:
if months == -11:
return True
if years == 0:
if months == 1:
return True
return False
##returns true if x.month is the month before y.month, false otherwise
def date_is_precedent(x, y):
if x != x or y !=y: #to detect NANs
return False
x_date, y_date = date(x), date(y)
decision_month = y_date.month - x_date.month
decision_year = y_date.year - x_date.year
return is_valid_difference(decision_year, decision_month)
#############################################FUNCTIONS FOR SAVING AND LOADING DATA##########################################
def save_pickles(product_list = 'data.xlsx', id_list = 'producto_id.xlsx'):
if not(product_list.endswith('.xlsx') and id_list.endswith('.xlsx')):
print('ERROR: One or more of the given files are not excel files (.xlsx)')
return
data_df = pd.read_excel(product_list)
producto_id = pd.read_excel(id_list, index_col = 0)
data_df.to_pickle(product_list.split('.')[0] + '.pkl')
producto_id.to_pickle(id_list.split('.')[0] + '.pkl')
def load_pickles(product_list = 'data.pkl', id_list = 'producto_id.pkl'):
return pd.read_pickle(product_list), pd.read_pickle(id_list)
################################################################################################################################
def get_upper(s):
return ''.join([c for c in s if not c.islower()])
def score_coincidence(detalle, s):
ld = [word for word in detalle.split(' ') if word.isupper()]
ls = [word for word in s.split(' ') if word.isupper()]
complete_word_score = [len(word) in ld for word in ls if len(word)>4]
incomplete_word_score = [len(word)*any(word in biggerword for biggerword in ld) for word in ls if len(word)>4]
return sum(complete_word_score)*10000 + sum(incomplete_word_score)
def best_coincidence(detalle, producto_id):
return min(producto_id.index.to_series(), key = lambda x: ed.eval(get_upper(detalle), x) )
def infer_product_id(df, producto_id):
df_producto = df.detalle.apply(lambda x: best_coincidence(x, producto_id)[1])
for i in df_out.index:
if i%100 == 0:
print(i)
row = df_out.loc[i]
if row['producto'] != row['producto'] and row['producto_id'] != row['producto_id'] and row['detalle'] == row['detalle']: #to detect nans
score, best_producto, best_producto_id = best_coincidence(row['detalle'], producto_id)
if score>0:
df_out.loc[i, 'producto'], df_out.loc[i, 'producto_id'] = best_producto, best_producto_id
return df_out
def eliminate_nodetail_or_noprice(df):
bool_nonull = df['variedad_id'].apply(lambda x: x != 0)
bool_precio = df['precio'] == df['precio']
bool_detalle = df['detalle'] == df['detalle']
bool_varid = df['variedad_id'] == df['variedad_id']
bool_type_precio = df['precio'].apply(lambda x : isinstance(x, numbers.Number))
bool_type_varid = df['variedad_id'].apply(lambda x : isinstance(x, numbers.Number))
bool_type_detalle = df['detalle'].apply(lambda x : isinstance(x, str))
return df[bool_precio & bool_detalle & bool_varid & bool_nonull
& bool_type_precio & bool_type_detalle & bool_type_varid]
##groups by detail and eliminates all sets of sample size <= sample_size
def eliminate_small_samples(df, sample_size = 1):
df_out = df.copy()
glogp = df_out.groupby('variedad_id').logprecio
df_out['sample_size'] = glogp.transform(lambda x: x.size)
df_out = df_out[df_out['sample_size'] > sample_size]
return df_out
##eliminates outliers according to variance criterion on the prices
def eliminate_outliers_price(df, criterion = 0.05):
df_out = df.copy()
glogp = df_out.groupby('variedad_id').logprecio
df_out['var_log'] = glogp.transform('var')
df_out['mean_log'] = glogp.transform('mean')
df_out['criterio'] = df_out['var_log'] / ( (df_out['logprecio'] - df_out['mean_log'])**2 + 1e-12) + df_out.var_log.apply(lambda x: int(x == 0))
df_out = df_out[df_out['criterio'] > criterion]
return df_out
#TO DO
##eliminates outliers according to variance criterion on the price variations
##def eliminate_outliers_pricevariation(df):
## df_out = df.copy()
## df_out['year-month'] = df_out.fecha.apply(lambda x: '-'.join(x.split('-')[:-1] ) ) #standard format
## glogp = df_out.groupby('detalle').logprecio
## df_out['sample_size'] = glogp.transform(lambda x: x.size)
## df_out = df_out[data_df['sample_size'] > 1]
##
## aggregate_df = df_out.groupby(['year-month', 'detalle', 'producto', 'producto_id', 'variedad_id']).precio.aggregate(['min', 'max', 'mean'])
## aggregate_df = aggregate_df.reset_index().sort_values('fecha', kind = 'mergesort')
## aggregate_df = aggregate_df.sort_values('detalle', kind = 'mergesort')
## aggregate_df['monthly_percent_variation (mean)'] = aggregate_df['mean']/aggregate_df['mean'].shift(1) - 1
## aggregate_df['monthly_percent_variation (mean)'] = (aggregate_df['monthly_percent_variation (mean)']
## .where(aggregate_df['detalle'] == aggregate_df['detalle'].shift(1)
## & aggregate_df['fecha'] == aggregate_df['detalle'].shift(1)))
## aggregate_df['fecha_gabriel'] = aggregate_df['year-month'].apply(lambda x: date(x + '-01') )
## return df_out
########################################################## LOADING FILES ###########################################################
## save_pickles(product_list = 'data.xlsx', id_list = 'producto_id.xlsx') ##Save files as pickles to retrieve them faster
data_df = pd.read_excel('data.xlsx') ##Read file with data
## data_df.to_pickle('data.pkl')
#data_df = pd.read_pickle('data.pkl')
producto_id = pd.read_excel('producto_id.xlsx', index_col = 0) ##Read file of product - product_id pairs
## detalle_producto = pd.read_excel('product_detail.xlsx', index_col = 0) ##Read file of detail - product pairs
## detalle_producto = detalle_producto['producto']
## data_df, producto_id = load_pickles(product_list = 'data.pkl', id_list = 'producto_id.pkl') ##Load files from saved pickles
producto_id = producto_id['producto_id']
print(producto_id.head())
organizing_table = pd.read_excel('organizing_table.xlsx')
print('finished loading')
#####################################################################################################################################
data_df = eliminate_nodetail_or_noprice(data_df)
data_df['precio'] = data_df.precio.apply(lambda x: np.float64(x))
varid_producto = organizing_table[['variedad_id', 'producto']]
varid_producto = varid_producto.drop_duplicates()
varid_producto = pd.Series(data = varid_producto.producto.values, index = varid_producto.variedad_id.values)
#####################################################################################################################################
data_df['logprecio'] = np.log10(data_df.precio)
start_time = time.time()
##Outlier filter
##################################################### LABEL DATA WITH PRODUCT ####################################################
data_df['producto'] = data_df['variedad_id'].apply(lambda x: varid_producto[x])
data_df['producto_id'] = data_df['producto'].apply(lambda x: producto_id[x])
###################################################################################################
df_pre = data_df.copy()
for i in range(4):
data_df = eliminate_small_samples(data_df, 1)
data_df = eliminate_outliers_price(data_df, 0.1)
df_post = data_df.copy()
################################################### HERE COME THE PLOTS ##########################
source_pre = ColumnDataSource(df_pre)
menu_producto = Select(title = 'Producto', options = ['all'] + sorted(list(df_pre.producto.unique())) )
menu_varid = Select(title = 'Variedad id',
options = ['all'] + sorted( [str(ident) for ident in df_pre.variedad_id.unique()], key = int) )
plot = figure()
plot.scatter(x = 'variedad_id', y ='logprecio', source = source_pre)
def callback_producto(attr, old, new):
producto = menu_producto.value
varid = menu_varid.value
print(producto)
print(varid)
if producto == 'all':
menu_varid.options = ['all'] + sorted( [str(ident) for ident in df_pre.variedad_id.unique()], key = int)
source_pre.data = {col: df_pre[col] for col in df_pre.columns}
else:
menu_varid.options = ['all'] + sorted( [str(ident) for ident
in df_pre[df_pre.producto == producto].variedad_id.unique()],
key = int)
source_pre.data = {col: df_pre[col][df_pre.producto == producto] for col in df_pre.columns}
menu_varid.value == 'all'
def callback_varid(attr, old, new):
varid = menu_varid.value
producto = menu_producto.value
print(producto)
print(varid)
if varid == 'all':
if producto == 'all':
source_pre.data = {col: df_pre[col] for col in df_pre.columns}
else:
source_pre.data = {col: df_pre[col][df_pre.producto == producto] for col in df_pre.columns}
else:
source_pre.data = {col: df_pre[col][df_pre.variedad_id == int(varid)] for col in df_pre.columns}
menu_producto.on_change('value', callback_producto)
menu_varid.on_change('value', callback_varid)
layout = column(widgetbox(menu_producto, menu_varid), plot)
output_file('bokeh_plot.html')
curdoc().add_root(layout)
| [
"noreply@github.com"
] | googa27.noreply@github.com |
c20f0f7a5868837ea73f41a8c8eee637f736c9b1 | 30c7a32daf4dd9e587580d35c1f4115745d210fe | /blog/views.py | 3103202228087eb6d9ee1d03cb437e8eae518b14 | [] | no_license | pritishjain2001/my-first-blog | 395382c3c596d3d45e48299006076b8ef464dc38 | 70799eab6bea0971deb128359738701657ca4d60 | refs/heads/master | 2020-04-03T13:19:02.267020 | 2018-11-02T08:45:23 | 2018-11-02T08:45:23 | 155,281,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | from django.shortcuts import redirect
from .forms import PostForm
from django.utils import timezone
from .models import Post
from django.shortcuts import render, get_object_or_404
# Create your views here.
def post_list(request):
posts =Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts':posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form}) | [
"pritishjain2001@gmail.com"
] | pritishjain2001@gmail.com |
8d1b0b844d2d2cab532cd11a19b57dd8d4fcf822 | 5600f24b1347d1c66dbaa75f504ce42ddcdba753 | /manage.py | 7a85cdb4f447820405f9956afa53fa17d238c421 | [
"Apache-2.0"
] | permissive | pygabo/qamkunapah | f3d3e32087518d25fa999c7d089568957ac54daf | af31a3b1a01351b80f219e591fbac265539bb77d | refs/heads/master | 2020-04-19T05:40:58.608074 | 2019-01-28T17:41:47 | 2019-01-28T17:41:47 | 167,995,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# qamkunapah directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "qamkunapah"))
execute_from_command_line(sys.argv)
| [
"pygabo@gmail.com"
] | pygabo@gmail.com |
2aa3e8162fb25d6be4d1c22ceeefbc1179bc9e6e | bd29a8b647823f6589fe1d97e94b4ae92359df90 | /code/pesudo_predict.py | 37a4fea64b29becf29036ec17a6be6a64bb334f2 | [] | no_license | ziliwang/tweet-sentiment-extraction | df36fec9e4b72270a4f624985db3b84815dd985c | c2c41814e1c04bffffc94fa2dc161253230e08fd | refs/heads/master | 2022-10-21T19:43:11.981092 | 2020-06-18T03:35:44 | 2020-06-18T03:35:44 | 254,626,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,661 | py | import torch
from torch import nn, optim
from torch.nn import functional as F
from transformers import *
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
from sklearn.model_selection import train_test_split, KFold
import joblib
import click
import random
import numpy as np
import os
from tqdm import tqdm
from copy import deepcopy
from tokenizers import ByteLevelBPETokenizer
import pandas as pd
MAX_LEN = 100
cls_token_id = 0
pad_token_id = 1
sep_token_id = 2
def preprocess(tokenizer, df):
output = []
sentiment_hash = dict((v[1:], tokenizer.token_to_id(v)) for v in ('Ġpositive', 'Ġnegative', 'Ġneutral'))
for line, row in df.iterrows():
if pd.isna(row.text): continue
text = row.text
if not text.startswith(' '): text = ' ' + text
record = {}
encoding = tokenizer.encode(text.replace('`', "'"))
record['tokens_id'] = encoding.ids
record['sentiment'] = sentiment_hash[row.sentiment]
record['offsets'] = encoding.offsets
record['text'] = text
record['id'] = row.textID
output.append(record)
return output
def collect_func(records):
ids = []
inputs = []
offsets = []
texts = []
for rec in records:
ids.append(rec['id'])
inputs.append(torch.LongTensor([cls_token_id, rec['sentiment'], sep_token_id] + rec['tokens_id'][:MAX_LEN] + [sep_token_id]))
offsets.append(rec['offsets'])
texts.append(rec['text'])
return ids, pad_sequence(inputs, batch_first=True, padding_value=pad_token_id).cuda(), offsets, texts
class TaskLayer(nn.Module):
def __init__(self, hidden_size):
super(TaskLayer, self).__init__()
self.hidden_size = hidden_size
# self.query = nn.Sequential(nn.Linear(self.hidden_size, self.hidden_size), nn.Tanh(), nn.Linear(self.hidden_size, self.hidden_size))
self.query = nn.Linear(self.hidden_size, self.hidden_size)
# self.key = nn.Sequential(nn.Linear(self.hidden_size, self.hidden_size), nn.Tanh(), nn.Linear(self.hidden_size, self.hidden_size))
self.key = nn.Linear(self.hidden_size, self.hidden_size)
def forward(self, hidden_states, attention_mask=None):
bsz, slen, hsz = hidden_states.shape
query = self.query(hidden_states)
key = self.key(hidden_states) # b x s_len x h
logits = torch.matmul(query, key.transpose(-1, -2)) # b x s_len x s_len
logits = logits/np.sqrt(self.hidden_size)
if attention_mask is not None: # 1 for available, 0 for unavailable
attention_mask = attention_mask[:, :, None].expand(-1, -1, slen) * attention_mask[:, None, :].expand(-1, slen, -1)
else:
attention_mask = torch.ones(bsz, slen, slen)
if hidden_states.is_cuda:
attention_mask = attention_mask.cuda()
attention_mask = torch.triu(attention_mask)
logits = logits*attention_mask - 1e6*(1-attention_mask)
logits = logits.view(bsz, -1) # b x slen*slen
return logits
class TweetSentiment(BertPreTrainedModel):
def __init__(self, config):
super(TweetSentiment, self).__init__(config)
# setattr(config, 'output_hidden_states', True)
self.roberta = RobertaModel(config)
self.task = TaskLayer(config.hidden_size)
self.dropout = nn.Dropout(0.2)
self.init_weights()
# torch.nn.init.normal_(self.logits.weight, std=0.02)
def forward(self, input_ids, attention_mask=None, start_positions=None, end_positions=None):
outputs = self.roberta(input_ids, attention_mask=attention_mask)
# hidden_states = torch.cat([outputs[0], outputs[1].unsqueeze(1).expand_as(outputs[0])], dim=-1)
hidden_states = outputs[0]
p_mask = attention_mask.float() * (input_ids != sep_token_id).float()
p_mask[:,:2] = 0
span_logits = self.task(self.dropout(hidden_states), attention_mask=p_mask) # b x slen*slen
bsz, slen = input_ids.shape
if start_positions is not None and end_positions is not None:
span = start_positions * slen + end_positions
loss = F.cross_entropy(span_logits, span, reduction='none')
return loss
else:
return span_logits
def pridect_epoch(model, dataiter, span_logits_bagging):
model.eval()
with torch.no_grad():
for ids, inputs, _, _ in dataiter:
span_logits = model(input_ids=inputs, attention_mask=(inputs!=pad_token_id).long())
span_prob = span_logits.softmax(-1)
for i, v in zip(ids, span_prob):
if i in span_logits_bagging:
span_logits_bagging[i] += v
else:
span_logits_bagging[i] = v
@click.command()
@click.option('--test-path', default='../input/test.csv')
@click.option('--vocab', default='../model/roberta-l12/vocab.json')
@click.option('--merges', default='../model/roberta-l12/merges.txt')
@click.option('--models', default='trained.models')
@click.option('--config', default='../model/roberta-l12/config.json')
def main(test_path, vocab, merges, models, config):
moses_token_func = XLMTokenizer(vocab, merges, lowercase=True).moses_tokenize
tokenizer = ByteLevelBPETokenizer(vocab, merges, lowercase=True, add_prefix_space=True)
test_df = pd.read_csv(test_path)
test = preprocess(tokenizer, moses_token_func, test_df)
model_config = RobertaConfig.from_json_file(config)
saved_models = torch.load(models)
model = TweetSentiment(model_config).cuda()
testiter = DataLoader(test, batch_size=32, shuffle=False, collate_fn=collect_func)
print(f"5cv {saved_models['score']}")
span_logits_bagging = {}
for state_dict in saved_models['models']:
model.load_state_dict(state_dict)
pridect_epoch(model, testiter, span_logits_bagging)
id2sentiment = dict((r.textID, r.sentiment) for _, r in test_df.iterrows())
starts = []
ends = []
for ids, inputs, offsets, texts in testiter:
bsz, slen = inputs.shape
for id, offset, text in zip(ids, offsets, texts):
if id
span = span_logits_bagging[id].max(-1)[1].cpu().numpy()
start, end = divmod(span, slen)
starts.append(start)
ends.append(end)
starts = np.concatenate(starts)
ends = np.concatenate(ends)
submit = pd.DataFrame()
submit['textID'] = test_df['textID']
submit['selected_text'] = [predicts.get(r.textID, r.text) for _, r in test_df.iterrows()]
submit.to_csv("submission.csv", index=False)
if __name__ == "__main__":
main()
| [
"wzlnot@gmail.com"
] | wzlnot@gmail.com |
fa4317e5f94fd6a09b9a880d9212ead1a1af2a0e | bd18efaf3711c3e7c429d7ed43158ff66fa69511 | /Solutions/netmiko_exercise_sol.py | cd3e12ff2c9bab5794dca60fcf1f20708d68a191 | [] | no_license | aawarner/Workshop | db4bf940261f0903ffea364758b33c0f5cab6e96 | 6cbef41ba60539a49e5e2427d5e6b9911627a2c2 | refs/heads/master | 2020-06-21T09:22:29.633244 | 2019-07-18T20:38:57 | 2019-07-18T20:38:57 | 197,195,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | '''
EXERCISE:
Write a program that will log in to your CSR1000v and deploy the below access-list.
Changes should be written to a file called changes.txt.
ip access-list extended python_acl
remark ACL added by Python
permit ip any host 1.2.3.4
permit ip host 1.2.3.4 any
deny ip any any
BONUS:
After deploying the ACL download the startup config and the running config and write them to a file.
Compare the two files and write the differences to your console.
HINT!!!
Use python library difflib to compare the files and print the differences to your console.
'''
import netmiko
import difflib
# Netmiko define device
csr = {
'ip': '10.10.10.145',
'device_type': 'cisco_ios',
'username': 'cisco',
'password': 'cisco',
}
# Netmiko config set
configs = ['ip access-list extended python_acl', 'remark ACL added by Python', 'permit ip any host 1.2.3.4',
'permit ip host 1.2.3.4 any', 'deny ip any any']
# Establish SSH session via Netmiko
try:
conn = netmiko.ConnectHandler(**csr)
except (netmiko.NetMikoTimeoutException, netmiko.NetMikoAuthenticationException,) as e:
print(e)
# Deploy config set to CSR via Netmiko
changes = conn.send_config_set(configs)
# Write config changes to file
with open('changes.txt', 'w+') as f:
f.write(changes)
# Get Startup config and write to a file
with open('startup.txt', 'w+') as f:
f.write(conn.send_command('show start'))
with open('startup.txt', 'r') as f:
data = f.read().splitlines(True)
with open('startup.txt', 'w') as f:
f.writelines(data[4:])
# Get Running config and write to a file
with open('running.txt', 'w+') as f:
f.write(conn.send_command('show run'))
with open('running.txt', 'r') as f:
data = f.read().splitlines(True)
with open('running.txt', 'w') as f:
f.writelines(data[6:])
# Open startup and running configs
f = open('startup.txt', 'r')
startup = f.readlines()
f.close()
f = open('running.txt', 'r')
running = f.readlines()
f.close()
# Compare and print differences using difflib Python library
for line in difflib.unified_diff(startup, running):
print(line, end='')
| [
"aawarner@cisco.com"
] | aawarner@cisco.com |
4e56367be936d6b2ea5fa7e71d83e4f4d151b787 | e9c4845afff6d8e4b28a214699ff061778950362 | /summariser.py | e966ffd76acc0955b0112153a8e464cc93473efe | [
"Apache-2.0"
] | permissive | aronlebani/py-summariser | 9f9d095b6c08a20e2106af7fe4ebb5db9c019ff6 | 92ee252e7af4925f8126a22c125adaeb6431b8e7 | refs/heads/master | 2023-04-15T13:10:55.131307 | 2018-07-28T11:03:58 | 2018-07-28T11:03:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,735 | py | from utilities import Utilities
class Summariser:
def __init__(self):
self.utes = Utilities()
def findMaxValue(self, list):
list.sort(reverse=True)
return list[0]
def getMostFrequentWords(self, count, wordFrequencies):
return self.utes.getMostFrequentWords(count, wordFrequencies)
def reorderSentences(self, sentences, input):
# Reorder sentences to the order they were in the original text
orderHash = {}
outputSentences = []
for sentence in sentences:
idx = input.index(sentence)
orderHash[idx] = sentence
for key in sorted(orderHash):
outputSentences.append(orderHash[key])
return outputSentences
def summarise(self, input, numSentences):
# Get the frequency of each word in the input
wordFrequencies = self.utes.getWordFrequency(input)
# Now create a set of the X most frequent words
mostFrequentWords = self.getMostFrequentWords(100, wordFrequencies)
# Remove words of class preposition and conjunction
# mostFrequentWords = self.utes.removeWordsOfType(mostFrequentWords, 'preposition')
# mostFrequentWords = self.utes.removeWordsOfType(mostFrequentWords, 'conjunction')
# mostFrequentWords = self.utes.removeWordsOfType(mostFrequentWords, 'determiner')
# Break the input up into sentences
# workingSentences is used for the analysis, but
# actualSentences is used in the results so that the
# capitalisation will be correct
workingSentences = self.utes.getSentences(input.lower())
actualSentences = self.utes.getSentences(input)
# Iterate over the most frequent words, and add the first sentence
# that includes each word to the result
outputSentences = set()
for word in mostFrequentWords:
for sentence in workingSentences:
if word.lower() in sentence:
idx = workingSentences.index(sentence)
outputSentences.add(actualSentences[idx])
break
if len(outputSentences) >= numSentences:
break
if len(outputSentences) >= numSentences:
break
reorderedOutputSentences = self.reorderSentences(outputSentences, input)
result = ""
for sentence in reorderedOutputSentences:
result += sentence
result += "." # This isn't always correct - perhaps it should be whatever symbol the sentence finished with
if sentence != list(reorderedOutputSentences)[len(reorderedOutputSentences)-1]:
result += " "
return result
| [
"aron.lebani@gmail.com"
] | aron.lebani@gmail.com |
1658183b5c32965d1eafe7453747b27f7645785d | 76d16a25b3d0e96d6ba69139f2e85a7b7b6cfafa | /audio_file_handler.py | 36f1e7ba3f843244cc00ef75b56023c39a0a2e67 | [] | no_license | nksubramanian/LinkedIn | 8f1efb20211496abd8c055c528d5fa53c9060759 | 974b78ff4c4b475169fc0e23aeccd3395e660490 | refs/heads/master | 2023-04-07T23:04:41.986110 | 2021-04-16T19:12:59 | 2021-04-16T19:12:59 | 356,210,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,252 | py | from abc import abstractmethod
from datetime import datetime
from business_errors import UserInputError
class Handler:
def __init__(self, persistence_gateway):
self.persistence_gateway = persistence_gateway
@abstractmethod
def _assert_creation_parameters_are_correct(self, creation_parameters): # pragma: no cover
pass
@abstractmethod
def _get_valid_properties(self): # pragma: no cover
pass
@abstractmethod
def _get_collection(self): # pragma: no cover
pass
def __filter_audio_file(self, creation_request):
valid_properties = self._get_valid_properties()
filtered = dict((key, value) for key, value in creation_request.items() if key in valid_properties)
return filtered
def update_audio_file(self, audio_file_id, creation_request):
self._assert_creation_parameters_are_correct(creation_request)
filtered_creation_request = self.__filter_audio_file(creation_request)
self.persistence_gateway.update(self._get_collection(), audio_file_id, filtered_creation_request)
def add_audio_file(self, audio_file_id, creation_request):
if type(audio_file_id) is not int:
raise UserInputError("Audio file Id has to be an integer")
self._assert_creation_parameters_are_correct(creation_request)
filtered_creation_request = self.__filter_audio_file(creation_request)
self.persistence_gateway.add(self._get_collection(), audio_file_id, filtered_creation_request)
def get_audio_file(self, audio_file_id):
return self.persistence_gateway.get(self._get_collection(), audio_file_id)
def delete_audio_file(self, audio_file_id):
self.persistence_gateway.delete(self._get_collection(), audio_file_id)
def get_audio_files(self):
return self.persistence_gateway.get_all(self._get_collection())
def _assert_property_is_valid_string(self, dictionary, property_name, max_length):
if property_name not in dictionary.keys():
raise UserInputError(f"{property_name} is mandatory")
val = dictionary[property_name]
if type(val) != str:
raise UserInputError(f"{property_name} has to be a string")
if val.isspace():
raise UserInputError(f"{property_name} is mandatory")
if len(val) > max_length:
raise UserInputError(f"{property_name}'s max length is {max_length}")
if len(val) == 0:
raise UserInputError(f"{property_name} is mandatory")
def _assert_property_is_positive_integer(self, dictionary, property_name):
if property_name not in dictionary.keys():
raise UserInputError(f"{property_name} is mandatory")
val = dictionary[property_name]
if type(val) != int:
raise UserInputError(f"{property_name} has to be a integer")
if val <= 0:
raise UserInputError(f"{property_name} has to be a positive integer")
def _assert_property_is_valid_datetime_string(self, r, property_name):
if property_name not in r.keys():
raise UserInputError(f"{property_name} is mandatory")
if type(r[property_name]) != str:
raise UserInputError(f"{property_name} needs to be in string format ex.2034-06-01 01:10:20")
try:
if datetime.now() > datetime.fromisoformat(r[property_name]):
raise UserInputError(f"{property_name} cannot be in the past")
except ValueError:
raise UserInputError(f"{property_name} needs to be in string format ex.2034-06-01 01:10:20")
class AudioBookHandler(Handler):
def __init__(self, persistence_gateway):
super(AudioBookHandler, self).__init__(persistence_gateway)
def _get_collection(self):
return "audiobook"
def _get_valid_properties(self):
return {"title", "author", "narrator", "duration", "uploaded_time"}
def _assert_creation_parameters_are_correct(self, creation_parameters):
self._assert_property_is_valid_string(creation_parameters, 'title', 100)
self._assert_property_is_valid_string(creation_parameters, 'author', 100)
self._assert_property_is_valid_string(creation_parameters, 'narrator', 100)
self._assert_property_is_positive_integer(creation_parameters, 'duration')
self._assert_property_is_valid_datetime_string(creation_parameters, 'uploaded_time')
class SongHandler(Handler):
def __init__(self, persistence_gateway):
super(SongHandler, self).__init__(persistence_gateway)
def _get_collection(self):
return "song"
def _assert_creation_parameters_are_correct(self, r):
self._assert_property_is_valid_string(r, 'name', 100)
self._assert_property_is_positive_integer(r, 'duration')
self._assert_property_is_valid_datetime_string(r, 'uploaded_time')
def _get_valid_properties(self):
return {"name", "duration", "uploaded_time"}
class PodcastHandler(Handler):
def __init__(self, persistence_gateway):
super(PodcastHandler, self).__init__(persistence_gateway)
def _get_collection(self):
return "podcast"
def _assert_creation_parameters_are_correct(self, r):
self._assert_property_is_valid_string(r, 'name', 100)
self._assert_property_is_positive_integer(r, 'duration')
self._assert_property_is_valid_datetime_string(r, 'uploaded_time')
self._assert_property_is_valid_string(r, 'host', 100)
if 'participants' in r.keys():
if type(r['participants']) != list:
raise UserInputError("participants has to be a list")
if len(r['participants']) > 10:
raise UserInputError("participants can have maximum of 10 members")
for member in r['participants']:
if type(member) is not str:
raise UserInputError("Each member of the participant has to be string")
for member in r['participants']:
if len(member) > 100:
raise UserInputError("maximum characters allowed for all participants in 100")
def _get_valid_properties(self):
return {"name", "duration", "uploaded_time", "host", "participants"}
| [
"subramanian.nk@live.com"
] | subramanian.nk@live.com |
ce111a096fc4010887c3e124847b0bd5b9a433d8 | cf1ae859ca1857bf6d87e22244df0a0762bdf1b1 | /AdventOfCode2017/day03.py | d3a19d2c127a0ba7332e4f51ff7283f3d2515a02 | [] | no_license | LysanderGG/AdventOfCode2017 | 19e1cf32497f7c6209a783c55c8d9448c41491cc | d432155c7e6278f178c045a24e78134657a13a4f | refs/heads/master | 2021-09-14T03:58:20.950638 | 2018-05-08T02:14:16 | 2018-05-08T02:14:16 | 112,997,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,663 | py | input = 361527
from enum import Enum
class Direction(Enum):
LEFT = (-1, 0)
RIGHT = (1, 0)
TOP = (0, 1)
BOT = (0, -1)
def next_dir(d):
next_direction = {
Direction.LEFT: Direction.BOT,
Direction.BOT: Direction.RIGHT,
Direction.RIGHT: Direction.TOP,
Direction.TOP: Direction.LEFT,
}
return next_direction[d]
def generate(max):
l = {}
c = (0, 0)
dir = Direction.RIGHT
go_top_next = False
for i in range(1, max+1):
l[i] = c
if go_top_next:
dir = Direction.TOP
go_top_next = False
elif abs(c[0]) == abs(c[1]):
if dir == Direction.RIGHT:
go_top_next = True
else:
dir = next_dir(dir)
c = (c[0] + dir.value[0], c[1] + dir.value[1])
return l
def adjacent_values_sum(l, coord):
res = 0
adj_list = [
(-1, -1),
(-1, 0),
(-1, 1),
( 0, -1),
( 0, 1),
( 1, -1),
( 1, 0),
( 1, 1)
]
for c in adj_list:
adj = (c[0] + coord[0], c[1] + coord[1])
if adj in l:
res += l[adj]
return res
def generate2(max):
l = {(0,0) : 1}
c = (1, 0)
dir = Direction.RIGHT
go_top_next = True
last_val = 1
while last_val < max:
last_val = adjacent_values_sum(l, c)
l[c] = last_val
if go_top_next:
dir = Direction.TOP
go_top_next = False
elif abs(c[0]) == abs(c[1]):
if dir == Direction.RIGHT:
go_top_next = True
else:
dir = next_dir(dir)
c = (c[0] + dir.value[0], c[1] + dir.value[1])
return last_val
def solve(input):
grid = generate(input)
return abs(grid[input][0]) + abs(grid[input][1])
def solve2(input):
return generate2(input)
if __name__ == "__main__":
ans = solve(input)
print(f"Part1: {ans}")
ans = solve2(input)
print(f"Part2: {ans}")
| [
"lysandergc@gmail.com"
] | lysandergc@gmail.com |
22f45ce7c3eab8e3372b05c4c618757fa11bd942 | 419ff24d556bbf1078cb5106a67445c8dacd7220 | /sqllib.py | 251f1d7f79cfa133d32604036cb3f7ae846ed6d0 | [] | no_license | adtet/backend_elock | 9329cda534c593c0cfb7bd260777b9909e3b8e6c | f0ac8ebaa618e7587233b2cd9ca09fdb67a0c282 | refs/heads/main | 2023-06-09T21:27:18.297692 | 2021-06-28T00:10:02 | 2021-06-28T00:10:02 | 376,724,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,034 | py | from os import curdir
import mysql.connector
import json
from mysql.connector import cursor
def koneksi_sql():
db = mysql.connector.connect(host="localhost",
user="root",
password="",
database="db_elock")
return db
def input_user(plat_nomor,nik,nama,password,jenis_kendaraan):
db = koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("INSERT INTO `users`(`ID_Kendaraan`, `NIK_KTP`, `Nama`, `Password`, `Jenis_Kendaraan`) VALUES (%s,%s,%s,%s,%s)",(plat_nomor,nik,nama,password,jenis_kendaraan))
db.commit()
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
def cek_platnomor(plat_nomor):
db = koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("SELECT `ID_Kendaraan` FROM `users` WHERE `ID_Kendaraan`=%s",(plat_nomor,))
c = cursor.fetchone()
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
c = None
if c == None:
return False
else:
return True
def input_lokasi(plat_nomor,lat,long):
db = koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("INSERT INTO `lokasi`(`ID_Kendaraan`, `latitude`, `longitude`, `time`) VALUES (%s,%s,%s,now())",(plat_nomor,lat,long))
db.commit()
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
def cek_user(nik,password):
db = koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("SELECT `NIK_KTP`, `Password` FROM `users` WHERE `NIK_KTP`=%s AND `Password`=%s ",(nik,password))
c = cursor.fetchone()
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
c = None
if c == None:
return False
else:
return True
def update_lokasi(lat,long,plat_nomor):
db = koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("UPDATE `lokasi` SET `latitude`=%s,`longitude`=%s,`time`=now() WHERE `ID_Kendaraan`=%s",(lat,long,plat_nomor))
db.commit()
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
def cek_plat_nomor_lokasi(plat_nomor):
db = koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("SELECT `ID_Kendaraan` FROM `lokasi` WHERE `ID_Kendaraan`=%s",(plat_nomor,))
c = cursor.fetchone()
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
c = None
if c == None:
return False
else:
return True
def get_lokasi(plat_nomor):
db =koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("SELECT `latitude`, `longitude` FROM `lokasi` WHERE `ID_Kendaraan`=%s",(plat_nomor,))
c = cursor.fetchone()
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
c = None
if c==None:
return None
else:
return c
def get_plat_nomor_base_nik_and_password(nik,password):
db = koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("SELECT `ID_Kendaraan` FROM `users` WHERE `NIK_KTP`=%s AND `Password`=%s",(nik,password))
c = cursor.fetchone()
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
c = None
return c[0]
def input_laporan(plat_nomor,deskripsi,lat,lng):
db = koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("INSERT INTO `laporan`( `ID_Kendaraan`, `deskripsi`, `tanggal`, `lat`, `lng`) VALUES (%s,%s,now(),%s,%s)",(plat_nomor,deskripsi,lat,lng))
db.commit()
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
def get_laporan_base_on_plat_nomor(plat_nomor):
db = koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("SELECT laporan.`ID_Kendaraan`, laporan.`deskripsi`, laporan.`tanggal`, laporan.`lat`, laporan.`lng`,users.Nama,users.Jenis_Kendaraan FROM `laporan` JOIN users ON users.ID_Kendaraan = laporan.ID_Kendaraan WHERE laporan.`ID_Kendaraan`=%s",(plat_nomor,))
rows = [x for x in cursor]
cols = [x[0] for x in cursor.description]
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
rows = []
cols = []
datas = []
for row in rows:
data = {}
for prop, val in zip(cols, row):
data[prop] = val
datas.append(data)
for i in range(0,len(datas)):
datas[i]['tanggal'] = str(datas[i]['tanggal'])
datajson = json.dumps(datas)
return datajson
def cek_plat_nomor_on_laporan(plat_nomor):
db = koneksi_sql()
cursor = db.cursor()
try:
cursor.execute("SELECT `ID_Kendaraan` FROM `laporan` WHERE `ID_Kendaraan`=%s",(plat_nomor,))
c = cursor.fetchone()
except(mysql.connector.Error,mysql.connector.Warning) as e:
print(e)
c = None
if c == None:
return False
else:
return True | [
"adiyatma.padang@gmail.com"
] | adiyatma.padang@gmail.com |
99ce5f1f02dc4e30702849d04ebc43d8bc515a61 | 23c6a394d7376b5a296248b7fcc185d163939266 | /tests/test_specific.py | e1af03e1a6d2d585a93dbd6a6ef6f842534f1658 | [
"BSD-3-Clause"
] | permissive | xadrnd/display_sdk_android | eac6d9b3f9fb1b669bce6d770f6dc7ac8ff69ac2 | 42b83ab9e5fce52ba180b7b4911046a4b55c1b08 | refs/heads/master | 2021-09-12T12:44:13.582162 | 2018-04-16T19:42:30 | 2018-04-16T19:42:30 | 113,114,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,360 | py | from test_base import DisplaySDKTest
from utils import *
SLEEP_INTERVAL = 2
class SpecificTest(DisplaySDKTest):
def dev_test(self):
switch_on_testmode(self)
def test_banner(self):
set_channel_id(self, "22394")
click_load_ad_btn(self, "Banner")
accept_location(self)
webview = block_until_webview(self)
sleep(SLEEP_INTERVAL)
webview = self.driver.find_element_by_id("com.xad.sdk.sdkdemo:id/banner_ad_container")
webview.click()
sleep(SLEEP_INTERVAL)
assert_viewing_browser(self)
sleep(SLEEP_INTERVAL)
click_back_btn(self)
sleep(SLEEP_INTERVAL)
# Assert bid agent is called when clicked
assert_href_called(self, [r"/landingpage", r"notify.bidagent.xad.com"])
def test_interstitial(self):
set_channel_id(self, "15999")
click_load_ad_btn(self, "Interstitial")
accept_location(self)
sleep(SLEEP_INTERVAL)
block_until_webview(self)
sleep(SLEEP_INTERVAL)
switch_to_web_context(self)
sleep(SLEEP_INTERVAL)
xad_div = self.driver.find_element_by_id("xad")
self.assertIsNotNone(xad_div)
sleep(SLEEP_INTERVAL)
switch_to_native_context(self)
sleep(SLEEP_INTERVAL)
# nagivate back
try:
click_x_btn(self)
except:
pass
sleep(SLEEP_INTERVAL)
# Assert impression pixel is called
assert_href_called(self, [r"eastads.simpli.fi", r"notify.bidagent.xad.com"])
def test_vast_inline_linear(self):
set_channel_id(self, "24373")
click_load_ad_btn(self, "VIDEO")
accept_location(self)
block_until_class_name(self, "android.widget.ImageButton")
positions = []
positions.append((50, 50))
self.driver.tap(positions)
sleep(SLEEP_INTERVAL)
assert_viewing_browser(self)
sleep(SLEEP_INTERVAL)
click_back_btn(self)
sleep(30)
positions = []
positions.append((50, 50))
self.driver.tap(positions)
sleep(SLEEP_INTERVAL)
assert_viewing_browser(self)
sleep(SLEEP_INTERVAL)
click_back_btn(self)
sleep(40)
block_until_class_name(self, "android.widget.ImageButton")
btns = self.driver.find_elements_by_class_name("android.widget.ImageButton")
btns[0].click()
sleep(SLEEP_INTERVAL)
assert_href_called(self, [
r"/click$",
r"/impression$",
r"/creativeView$",
r"/start$",
r"/firstQuartile$",
r"/midpoint$",
r"/thirdQuartile$",
r"/complete$"
])
def test_vast_inline_linear_error(self):
set_channel_id(self, "24403")
click_load_ad_btn(self, "Video")
accept_location(self)
sleep(SLEEP_INTERVAL)
positions = []
positions.append((50, 50))
self.driver.tap(positions)
sleep(SLEEP_INTERVAL)
# Assert error url called
assert_href_called(self, r"/error")
def test_vast_urlencode_error(self):
set_channel_id(self, "28228")
click_load_ad_btn(self, "Video")
accept_location(self)
sleep(SLEEP_INTERVAL)
positions = []
positions.append((50, 50))
self.driver.tap(positions)
def test_vast_wrapper_linear_1_error(self):
set_channel_id(self, "33838")
click_load_ad_btn(self, "Video")
accept_location(self)
sleep(SLEEP_INTERVAL)
positions = []
positions.append((50, 50))
self.driver.tap(positions)
sleep(SLEEP_INTERVAL)
# Assert error url called
assert_href_called(self, r"/wrapper/error$")
assert_href_called(self, r"/error$")
def test_vast_wrapper_linear_1(self):
set_channel_id(self, "24383")
click_load_ad_btn(self, "Video")
accept_location(self)
block_until_class_name(self, "android.widget.ImageButton")
# Click middle video
positions = []
positions.append((5, 5))
self.driver.tap(positions)
sleep(SLEEP_INTERVAL)
assert_viewing_browser(self)
click_back_btn(self)
# Poll until video is done
sleep(35)
# Click middle video
positions = []
positions.append((5, 5))
self.driver.tap(positions)
sleep(SLEEP_INTERVAL)
assert_viewing_browser(self)
click_back_btn(self)
sleep(45)
btns = self.driver.find_elements_by_class_name("android.widget.ImageButton")
btns[0].click()
sleep(SLEEP_INTERVAL)
assert_href_called(self, [
r"/click$",
r"/impression$",
r"/creativeView$",
r"/start$",
r"/firstQuartile$",
r"/midpoint$",
r"/thirdQuartile$",
r"/complete$",
r"/wrapper/click$",
r"/wrapper/impression$",
r"/wrapper/start$",
r"/wrapper/firstQuartile$",
r"/wrapper/pause$",
r"/wrapper/resume$",
r"/wrapper/midpoint$",
r"/wrapper/thirdQuartile$",
r"/wrapper/complete$"
])
def test_vast_wrapper_linear_2(self):
set_channel_id(self, "24388")
click_load_ad_btn(self, "Video")
block_until_class_name(self, "android.widget.ImageButton")
sleep(SLEEP_INTERVAL)
sleep(30)
# Click middle video
positions = []
positions.append((5, 5))
self.driver.tap(positions)
sleep(SLEEP_INTERVAL)
assert_viewing_browser(self)
click_back_btn(self)
# Poll until video is done
sleep(30)
btns = self.driver.find_elements_by_class_name("android.widget.ImageButton")
btns[0].click()
sleep(SLEEP_INTERVAL)
assert_href_called(self, [
r"/click$",
r"/impression$",
r"/creativeView$",
r"/start$",
r"/firstQuartile$",
r"/midpoint$",
r"/thirdQuartile$",
r"/complete$",
r"/wrapper/impression$"
])
| [
"jacob.zelek@xad.com"
] | jacob.zelek@xad.com |
18222fbec173b1f264ef44678137c0537a20171a | 2baad3d7ac8df92ac7669bc5226b295f6e572109 | /openstack/cinder/exception.py | 47241139ccd27fd79ad6db6a5fa19c5baf884d53 | [] | no_license | naanal/reference | 649d61110c0200718de186012466183c35071399 | e5401ab6d185f9879da4b7fd20dead4823d662cc | refs/heads/master | 2020-04-12T07:33:59.989760 | 2016-12-21T12:26:30 | 2016-12-21T12:26:30 | 57,004,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,708 | py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cinder base exception handling.
Includes decorator for re-raising Cinder-type exceptions.
SHOULD include dedicated exception logging.
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
import six
import webob.exc
from cinder.i18n import _, _LE
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class Error(Exception):
pass
class CinderException(Exception):
"""Base Cinder Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
self.kwargs['message'] = message
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
for k, v in self.kwargs.iteritems():
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
if self._should_format():
try:
message = self.message % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
raise exc_info[0], exc_info[1], exc_info[2]
# at least get the core message out if something happened
message = self.message
elif isinstance(message, Exception):
message = six.text_type(message)
# NOTE(luisg): We put the actual message in 'msg' so that we can access
# it, because if we try to access the message via 'message' it will be
# overshadowed by the class' message attribute
self.msg = message
super(CinderException, self).__init__(message)
def _should_format(self):
return self.kwargs['message'] is None or '%(message)' in self.message
def __unicode__(self):
return six.text_type(self.msg)
class VolumeBackendAPIException(CinderException):
message = _("Bad or unexpected response from the storage volume "
"backend API: %(data)s")
class VolumeDriverException(CinderException):
message = _("Volume driver reported an error: %(message)s")
class BackupDriverException(CinderException):
message = _("Backup driver reported an error: %(message)s")
class GlanceConnectionFailed(CinderException):
message = _("Connection to glance failed: %(reason)s")
class NotAuthorized(CinderException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotAuthorized(CinderException):
message = _("Not authorized for image %(image_id)s.")
class DriverNotInitialized(CinderException):
message = _("Volume driver not ready.")
class Invalid(CinderException):
message = _("Unacceptable parameters.")
code = 400
class InvalidSnapshot(Invalid):
message = _("Invalid snapshot: %(reason)s")
class InvalidVolumeAttachMode(Invalid):
message = _("Invalid attaching mode '%(mode)s' for "
"volume %(volume_id)s.")
class VolumeAttached(Invalid):
message = _("Volume %(volume_id)s is still attached, detach volume first.")
class SfJsonEncodeFailure(CinderException):
message = _("Failed to load data into json format")
class InvalidResults(Invalid):
message = _("The results are invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received: %(reason)s")
class InvalidVolumeType(Invalid):
message = _("Invalid volume type: %(reason)s")
class InvalidVolume(Invalid):
message = _("Invalid volume: %(reason)s")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidHost(Invalid):
message = _("Invalid host: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAuthKey(Invalid):
message = _("Invalid auth key: %(reason)s")
class InvalidConfigurationValue(Invalid):
message = _('Value "%(value)s" is not valid for '
'configuration option "%(option)s"')
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class DeviceUnavailable(Invalid):
message = _("The device in the path %(path)s is unavailable: %(reason)s")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class APIException(CinderException):
message = _("Error while requesting %(service)s API.")
def __init__(self, message=None, **kwargs):
if 'service' not in kwargs:
kwargs['service'] = 'unknown'
super(APIException, self).__init__(message, **kwargs)
class APITimeout(APIException):
message = _("Timeout while requesting %(service)s API.")
class NotFound(CinderException):
message = _("Resource could not be found.")
code = 404
safe = True
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class VolumeAttachmentNotFound(NotFound):
message = _("Volume attachment could not be found with "
"filter: %(filter)s .")
class VolumeMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeAdminMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no administration metadata with "
"key %(metadata_key)s.")
class InvalidVolumeMetadata(Invalid):
message = _("Invalid metadata: %(reason)s")
class InvalidVolumeMetadataSize(Invalid):
message = _("Invalid metadata size: %(reason)s")
class SnapshotMetadataNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeTypeNotFound(NotFound):
message = _("Volume type %(volume_type_id)s could not be found.")
class VolumeTypeNotFoundByName(VolumeTypeNotFound):
message = _("Volume type with name %(volume_type_name)s "
"could not be found.")
class VolumeTypeAccessNotFound(NotFound):
message = _("Volume type access not found for %(volume_type_id)s / "
"%(project_id)s combination.")
class VolumeTypeExtraSpecsNotFound(NotFound):
message = _("Volume Type %(volume_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class VolumeTypeInUse(CinderException):
message = _("Volume Type %(volume_type_id)s deletion is not allowed with "
"volumes present with the type.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ServerNotFound(NotFound):
message = _("Instance %(uuid)s could not be found.")
class VolumeIsBusy(CinderException):
message = _("deleting volume %(volume_name)s that has snapshot")
class SnapshotIsBusy(CinderException):
message = _("deleting snapshot %(snapshot_name)s that has "
"dependent volumes")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerHostWeigherNotFound(NotFound):
message = _("Scheduler Host Weigher %(weigher_name)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(CinderException):
message = _("Quota exceeded for resources: %(overs)s")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class Duplicate(CinderException):
pass
class VolumeTypeExists(Duplicate):
message = _("Volume Type %(id)s already exists.")
class VolumeTypeAccessExists(Duplicate):
message = _("Volume type access for %(volume_type_id)s / "
"%(project_id)s combination already exists.")
class VolumeTypeEncryptionExists(Invalid):
message = _("Volume type encryption for type %(type_id)s already exists.")
class VolumeTypeEncryptionNotFound(NotFound):
message = _("Volume type encryption for type %(type_id)s does not exist.")
class MalformedRequestBody(CinderException):
message = _("Malformed message body: %(reason)s")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s")
class ParameterNotFound(NotFound):
message = _("Could not find parameter %(param)s")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
class NoValidHost(CinderException):
message = _("No valid host was found. %(reason)s")
class NoMoreTargets(CinderException):
"""No more available targets."""
pass
class QuotaError(CinderException):
message = _("Quota exceeded: code=%(code)s")
code = 413
headers = {'Retry-After': 0}
safe = True
class VolumeSizeExceedsAvailableQuota(QuotaError):
message = _("Requested volume or snapshot exceeds allowed gigabytes "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
class VolumeBackupSizeExceedsAvailableQuota(QuotaError):
message = _("Requested backup exceeds allowed Backup gigabytes "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
class VolumeLimitExceeded(QuotaError):
message = _("Maximum number of volumes allowed (%(allowed)d) exceeded")
class SnapshotLimitExceeded(QuotaError):
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded")
class BackupLimitExceeded(QuotaError):
message = _("Maximum number of backups allowed (%(allowed)d) exceeded")
class DuplicateSfVolumeNames(Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
class VolumeTypeCreateFailed(CinderException):
message = _("Cannot create volume_type with "
"name %(name)s and specs %(extra_specs)s")
class VolumeTypeUpdateFailed(CinderException):
message = _("Cannot update volume_type %(id)s")
class UnknownCmd(VolumeDriverException):
message = _("Unknown or unsupported command %(cmd)s")
class MalformedResponse(VolumeDriverException):
message = _("Malformed response to command %(cmd)s: %(reason)s")
class FailedCmdWithDump(VolumeDriverException):
message = _("Operation failed with status=%(status)s. Full dump: %(data)s")
class InvalidConnectorException(VolumeDriverException):
message = _("Connector doesn't have required information: %(missing)s")
class GlanceMetadataExists(Invalid):
message = _("Glance metadata cannot be updated, key %(key)s"
" exists for volume id %(volume_id)s")
class GlanceMetadataNotFound(NotFound):
message = _("Glance metadata for volume/snapshot %(id)s cannot be found.")
class ExportFailure(Invalid):
message = _("Failed to export for volume: %(reason)s")
class RemoveExportException(VolumeDriverException):
message = _("Failed to remove export for volume %(volume)s: %(reason)s")
class MetadataCreateFailure(Invalid):
message = _("Failed to create metadata for volume: %(reason)s")
class MetadataUpdateFailure(Invalid):
message = _("Failed to update metadata for volume: %(reason)s")
class MetadataCopyFailure(Invalid):
message = _("Failed to copy metadata to volume: %(reason)s")
class ImageCopyFailure(Invalid):
message = _("Failed to copy image to volume: %(reason)s")
class BackupInvalidCephArgs(BackupDriverException):
message = _("Invalid Ceph args provided for backup rbd operation")
class BackupOperationError(Invalid):
message = _("An error has occurred during backup operation")
class BackupMetadataUnsupportedVersion(BackupDriverException):
message = _("Unsupported backup metadata version requested")
class BackupVerifyUnsupportedDriver(BackupDriverException):
message = _("Unsupported backup verify driver")
class VolumeMetadataBackupExists(BackupDriverException):
message = _("Metadata backup already exists for this volume")
class BackupRBDOperationFailed(BackupDriverException):
message = _("Backup RBD operation failed")
class EncryptedBackupOperationFailed(BackupDriverException):
message = _("Backup operation of an encrypted volume failed.")
class BackupNotFound(NotFound):
message = _("Backup %(backup_id)s could not be found.")
class BackupFailedToGetVolumeBackend(NotFound):
message = _("Failed to identify volume backend.")
class InvalidBackup(Invalid):
message = _("Invalid backup: %(reason)s")
class SwiftConnectionFailed(BackupDriverException):
message = _("Connection to swift failed: %(reason)s")
class TransferNotFound(NotFound):
message = _("Transfer %(transfer_id)s could not be found.")
class VolumeMigrationFailed(CinderException):
message = _("Volume migration failed: %(reason)s")
class SSHInjectionThreat(CinderException):
message = _("SSH command injection detected: %(command)s")
class QoSSpecsExists(Duplicate):
message = _("QoS Specs %(specs_id)s already exists.")
class QoSSpecsCreateFailed(CinderException):
message = _("Failed to create qos_specs: "
"%(name)s with specs %(qos_specs)s.")
class QoSSpecsUpdateFailed(CinderException):
message = _("Failed to update qos_specs: "
"%(specs_id)s with specs %(qos_specs)s.")
class QoSSpecsNotFound(NotFound):
message = _("No such QoS spec %(specs_id)s.")
class QoSSpecsAssociateFailed(CinderException):
message = _("Failed to associate qos_specs: "
"%(specs_id)s with type %(type_id)s.")
class QoSSpecsDisassociateFailed(CinderException):
message = _("Failed to disassociate qos_specs: "
"%(specs_id)s with type %(type_id)s.")
class QoSSpecsKeyNotFound(NotFound):
message = _("QoS spec %(specs_id)s has no spec with "
"key %(specs_key)s.")
class InvalidQoSSpecs(Invalid):
message = _("Invalid qos specs: %(reason)s")
class QoSSpecsInUse(CinderException):
message = _("QoS Specs %(specs_id)s is still associated with entities.")
class KeyManagerError(CinderException):
msg_fmt = _("key manager error: %(reason)s")
class ManageExistingInvalidReference(CinderException):
message = _("Manage existing volume failed due to invalid backend "
"reference %(existing_ref)s: %(reason)s")
class ReplicationError(CinderException):
message = _("Volume %(volume_id)s replication "
"error: %(reason)s")
class ReplicationNotFound(NotFound):
message = _("Volume replication for %(volume_id)s "
"could not be found.")
class ManageExistingVolumeTypeMismatch(CinderException):
message = _("Manage existing volume failed due to volume type mismatch: "
"%(reason)s")
class ExtendVolumeError(CinderException):
message = _("Error extending volume: %(reason)s")
class EvaluatorParseException(Exception):
message = _("Error during evaluator parsing: %(reason)s")
class ObjectActionError(CinderException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class ObjectFieldInvalid(CinderException):
msg_fmt = _('Field %(field)s of %(objname)s is not an instance of Field')
class UnsupportedObjectError(CinderException):
msg_fmt = _('Unsupported object type %(objtype)s')
class OrphanedObjectError(CinderException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class IncompatibleObjectVersion(CinderException):
msg_fmt = _('Version %(objver)s of %(objname)s is not supported')
class ReadOnlyFieldError(CinderException):
msg_fmt = _('Cannot modify readonly field %(field)s')
# Driver specific exceptions
# Coraid
class CoraidException(VolumeDriverException):
message = _('Coraid Cinder Driver exception.')
class CoraidJsonEncodeFailure(CoraidException):
message = _('Failed to encode json data.')
class CoraidESMBadCredentials(CoraidException):
message = _('Login on ESM failed.')
class CoraidESMReloginFailed(CoraidException):
message = _('Relogin on ESM failed.')
class CoraidESMBadGroup(CoraidException):
message = _('Group with name "%(group_name)s" not found.')
class CoraidESMConfigureError(CoraidException):
message = _('ESM configure request failed: %(reason)s')
class CoraidESMNotAvailable(CoraidException):
message = _('Coraid ESM not available with reason: %(reason)s')
# Pure Storage
class PureDriverException(VolumeDriverException):
message = _("Pure Storage Cinder driver failure: %(reason)s")
# Zadara
class ZadaraException(VolumeDriverException):
message = _('Zadara Cinder Driver exception.')
class ZadaraServerCreateFailure(ZadaraException):
message = _("Unable to create server object for initiator %(name)s")
class ZadaraServerNotFound(ZadaraException):
message = _("Unable to find server object for initiator %(name)s")
class ZadaraVPSANoActiveController(ZadaraException):
message = _("Unable to find any active VPSA controller")
class ZadaraAttachmentsNotFound(ZadaraException):
message = _("Failed to retrieve attachments for volume %(name)s")
class ZadaraInvalidAttachmentInfo(ZadaraException):
message = _("Invalid attachment info for volume %(name)s: %(reason)s")
class BadHTTPResponseStatus(ZadaraException):
message = _("Bad HTTP response status %(status)s")
# SolidFire
class SolidFireAPIException(VolumeBackendAPIException):
message = _("Bad response from SolidFire API")
class SolidFireDriverException(VolumeDriverException):
message = _("SolidFire Cinder Driver exception")
class SolidFireAPIDataException(SolidFireAPIException):
message = _("Error in SolidFire API response: data=%(data)s")
class SolidFireAccountNotFound(SolidFireDriverException):
message = _("Unable to locate account %(account_name)s on "
"Solidfire device")
class SolidFireRetryableException(VolumeBackendAPIException):
message = _("Retryable SolidFire Exception encountered")
# HP 3Par
class Invalid3PARDomain(VolumeDriverException):
message = _("Invalid 3PAR Domain: %(err)s")
# RemoteFS drivers
class RemoteFSException(VolumeDriverException):
message = _("Unknown RemoteFS exception")
class RemoteFSNoSharesMounted(RemoteFSException):
message = _("No mounted shares found")
class RemoteFSNoSuitableShareFound(RemoteFSException):
message = _("There is no share which can host %(volume_size)sG")
# NFS driver
class NfsException(RemoteFSException):
message = _("Unknown NFS exception")
class NfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted NFS shares found")
class NfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG")
# Smbfs driver
class SmbfsException(RemoteFSException):
message = _("Unknown SMBFS exception.")
class SmbfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted SMBFS shares found.")
class SmbfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG.")
# Gluster driver
class GlusterfsException(RemoteFSException):
message = _("Unknown Gluster exception")
class GlusterfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted Gluster shares found")
class GlusterfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG")
# HP MSA
class HPMSAVolumeDriverException(VolumeDriverException):
message = _("HP MSA Volume Driver exception")
class HPMSAInvalidVDisk(HPMSAVolumeDriverException):
message = _("VDisk doesn't exist (%(vdisk)s)")
class HPMSAConnectionError(HPMSAVolumeDriverException):
message = _("Unable to connect to MSA array")
class HPMSANotEnoughSpace(HPMSAVolumeDriverException):
message = _("Not enough space on VDisk (%(vdisk)s)")
# Fibre Channel Zone Manager
class ZoneManagerException(CinderException):
message = _("Fibre Channel connection control failure: %(reason)s")
class FCZoneDriverException(CinderException):
message = _("Fibre Channel Zone operation failed: %(reason)s")
class FCSanLookupServiceException(CinderException):
message = _("Fibre Channel SAN Lookup failure: %(reason)s")
class BrocadeZoningCliException(CinderException):
message = _("Fibre Channel Zoning CLI error: %(reason)s")
class CiscoZoningCliException(CinderException):
message = _("Fibre Channel Zoning CLI error: %(reason)s")
class NetAppDriverException(VolumeDriverException):
message = _("NetApp Cinder Driver exception.")
class EMCVnxCLICmdError(VolumeBackendAPIException):
message = _("EMC VNX Cinder Driver CLI exception: %(cmd)s "
"(Return Code: %(rc)s) (Output: %(out)s).")
# ConsistencyGroup
class ConsistencyGroupNotFound(NotFound):
message = _("ConsistencyGroup %(consistencygroup_id)s could not be found.")
class InvalidConsistencyGroup(Invalid):
message = _("Invalid ConsistencyGroup: %(reason)s")
# CgSnapshot
class CgSnapshotNotFound(NotFound):
message = _("CgSnapshot %(cgsnapshot_id)s could not be found.")
class InvalidCgSnapshot(Invalid):
message = _("Invalid CgSnapshot: %(reason)s")
# Hitachi Block Storage Driver
class HBSDError(CinderException):
message = _("HBSD error occurs.")
class HBSDCmdError(HBSDError):
def __init__(self, message=None, ret=None, err=None):
self.ret = ret
self.stderr = err
super(HBSDCmdError, self).__init__(message=message)
class HBSDBusy(HBSDError):
message = "Device or resource is busy."
class HBSDNotFound(NotFound):
message = _("Storage resource could not be found.")
class HBSDVolumeIsBusy(VolumeIsBusy):
message = _("Volume %(volume_name)s is busy.")
# Datera driver
class DateraAPIException(VolumeBackendAPIException):
message = _("Bad response from Datera API")
# Target drivers
class ISCSITargetCreateFailed(CinderException):
message = _("Failed to create iscsi target for volume %(volume_id)s.")
class ISCSITargetRemoveFailed(CinderException):
message = _("Failed to remove iscsi target for volume %(volume_id)s.")
class ISCSITargetAttachFailed(CinderException):
message = _("Failed to attach iSCSI target for volume %(volume_id)s.")
class ISCSITargetDetachFailed(CinderException):
message = _("Failed to detach iSCSI target for volume %(volume_id)s.")
class ISCSITargetHelperCommandFailed(CinderException):
message = _("%(error_message)s")
# X-IO driver exception.
class XIODriverException(VolumeDriverException):
message = _("X-IO Volume Driver exception!")
# Violin Memory drivers
class ViolinInvalidBackendConfig(CinderException):
message = _("Volume backend config is invalid: %(reason)s")
class ViolinRequestRetryTimeout(CinderException):
message = _("Backend service retry timeout hit: %(timeout)s sec")
class ViolinBackendErr(CinderException):
message = _("Backend reports: %(message)s")
class ViolinBackendErrExists(CinderException):
message = _("Backend reports: item already exists")
class ViolinBackendErrNotFound(CinderException):
message = _("Backend reports: item not found")
# ZFSSA NFS driver exception.
class WebDAVClientError(CinderException):
message = _("The WebDAV request failed. Reason: %(msg)s, "
"Return code/reason: %(code)s, Source Volume: %(src)s, "
"Destination Volume: %(dst)s, Method: %(method)s.")
# XtremIO Drivers
class XtremIOAlreadyMappedError(CinderException):
message = _("Volume to Initiator Group mapping already exists")
# DOTHILL drivers
class DotHillInvalidBackend(CinderException):
message = _("Backend doesn't exist (%(backend)s)")
class DotHillConnectionError(CinderException):
message = _("%(message)s")
class DotHillAuthenticationError(CinderException):
message = _("%(message)s")
class DotHillNotEnoughSpace(CinderException):
message = _("Not enough space on backend (%(backend)s)")
class DotHillRequestError(CinderException):
message = _("%(message)s")
class DotHillNotTargetPortal(CinderException):
message = _("No active iSCSI portals with supplied iSCSI IPs")
| [
"root@fuel.domain.tld"
] | root@fuel.domain.tld |
a279b4dd33c2332778b01ecfcb3b7b718e63bc99 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-dcs/huaweicloudsdkdcs/v2/model/resources.py | 35adc37ecc80c3cad66d112c38ed5ca85d9ad95c | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,638 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Resources:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'unit': 'str',
'min': 'int',
'max': 'int',
'quota': 'int',
'used': 'int',
'type': 'str'
}
attribute_map = {
'unit': 'unit',
'min': 'min',
'max': 'max',
'quota': 'quota',
'used': 'used',
'type': 'type'
}
def __init__(self, unit=None, min=None, max=None, quota=None, used=None, type=None):
"""Resources - a model defined in huaweicloud sdk"""
self._unit = None
self._min = None
self._max = None
self._quota = None
self._used = None
self._type = None
self.discriminator = None
if unit is not None:
self.unit = unit
if min is not None:
self.min = min
if max is not None:
self.max = max
if quota is not None:
self.quota = quota
if used is not None:
self.used = used
if type is not None:
self.type = type
@property
def unit(self):
"""Gets the unit of this Resources.
资源的计数单位。 - 当type为instance时,无单位。 - 当type为ram时,单位为GB。
:return: The unit of this Resources.
:rtype: str
"""
return self._unit
@unit.setter
def unit(self, unit):
"""Sets the unit of this Resources.
资源的计数单位。 - 当type为instance时,无单位。 - 当type为ram时,单位为GB。
:param unit: The unit of this Resources.
:type: str
"""
self._unit = unit
@property
def min(self):
"""Gets the min of this Resources.
- 当type为instance时,表示可申请实例配额的最小值。 - 当type为ram时,表示可申请内存配额的最小值。
:return: The min of this Resources.
:rtype: int
"""
return self._min
@min.setter
def min(self, min):
"""Sets the min of this Resources.
- 当type为instance时,表示可申请实例配额的最小值。 - 当type为ram时,表示可申请内存配额的最小值。
:param min: The min of this Resources.
:type: int
"""
self._min = min
@property
def max(self):
"""Gets the max of this Resources.
- 当type为instance时,表示可申请实例配额的最大值。 - 当type为ram时,表示可申请内存配额的最大值。
:return: The max of this Resources.
:rtype: int
"""
return self._max
@max.setter
def max(self, max):
"""Sets the max of this Resources.
- 当type为instance时,表示可申请实例配额的最大值。 - 当type为ram时,表示可申请内存配额的最大值。
:param max: The max of this Resources.
:type: int
"""
self._max = max
@property
def quota(self):
"""Gets the quota of this Resources.
可以创建的实例最大数和总内存的配额限制。
:return: The quota of this Resources.
:rtype: int
"""
return self._quota
@quota.setter
def quota(self, quota):
"""Sets the quota of this Resources.
可以创建的实例最大数和总内存的配额限制。
:param quota: The quota of this Resources.
:type: int
"""
self._quota = quota
@property
def used(self):
"""Gets the used of this Resources.
已创建的实例个数和已使用的内存配额。
:return: The used of this Resources.
:rtype: int
"""
return self._used
@used.setter
def used(self, used):
"""Sets the used of this Resources.
已创建的实例个数和已使用的内存配额。
:param used: The used of this Resources.
:type: int
"""
self._used = used
@property
def type(self):
"""Gets the type of this Resources.
支持instance、ram两种。 - instance表示实例配额。 - ram表示内存配额。
:return: The type of this Resources.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Resources.
支持instance、ram两种。 - instance表示实例配额。 - ram表示内存配额。
:param type: The type of this Resources.
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Resources):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
b1bee79a770a853c0c9cd06cc7438554cfae08c9 | 4e7f0e438e7d156ac1a27551181ca5430b3ece4a | /api/category/migrations/0001_initial.py | 6b4d4f3f491204f9256fffd1348e51f4d8494187 | [] | no_license | amangoyalji/onlinemart | 9a355a43049489ead8fda0e41efc7ffc1f8565a5 | bac61801f6b0148e2a82153f66cecc6bab938518 | refs/heads/master | 2022-12-20T21:31:11.858072 | 2020-09-15T02:44:41 | 2020-09-15T02:44:41 | 288,076,418 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # Generated by Django 3.1 on 2020-08-16 10:40
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.CharField(max_length=250)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"amana6398@gmail.com"
] | amana6398@gmail.com |
ef455910a003cfc1abbbadb2a5f3bc5e9e71c438 | 66053d9fcfe5734a833b6afea7335e4bad6ce9cf | /move_zeroes.py | 9280bb7858558b59690f55e643f1254662b53e44 | [] | no_license | redi-vinogradov/leetcode | 27bc8954801f819e7144403c215615c5a5675c8d | 56c40c1a9af7466a4bfc423d9ab110d8a21cd751 | refs/heads/master | 2020-03-31T09:19:09.411661 | 2018-10-11T18:33:09 | 2018-10-11T18:33:09 | 152,091,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | def moveZeroes(nums):
""" The trick is very simple: check if value is equal to zero and if it is
than remove that value from the list and add it to the end.
"""
zeroes = []
for i in nums:
if i == 0:
nums.remove(i)
nums.append(i)
print(nums)
def main():
nums = [0,1,0,3,12]
moveZeroes(nums)
if __name__ == '__main__':
main()
| [
"anton.vinogradov@thomsonreuters.com"
] | anton.vinogradov@thomsonreuters.com |
5165637a2110ba9de1465ca309f7f4353168dd71 | 0a4f57a41b536aab19bef26df300a359f3f2d626 | /app/misc.py | 5fc638df5c5012bb4a48221081166681d15ccb5e | [] | no_license | ash11sh/remove-glass | 9d4b19ddd4bcaa307f5fdf0b5ea6967a214a3104 | 6c8483e82590b644dcfeed4841f737c280415381 | refs/heads/main | 2023-05-02T17:57:20.757302 | 2023-04-16T12:44:50 | 2023-04-16T12:44:50 | 334,052,835 | 15 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,088 | py | import cv2
import torch
import numpy as np
from PIL import Image
from torchvision import transforms
from util.prepare_images import *
from torchvision.utils import save_image
import os
os.environ["LRU_CACHE_CAPACITY"] = "1"
def get_potrait(test_image, interpreter,input_details,output_details):
# get the potrait mask output
im = np.asarray(test_image)
h, w, _ = im.shape
face_rgba = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
# resize
image = cv2.resize(face_rgba, (512, 512), interpolation=cv2.INTER_AREA)
# Preprocess the input image
test_image = image / 255.0
test_image = np.expand_dims(test_image, axis=0).astype(input_details["dtype"])
# Run the interpreter and get the output
interpreter.set_tensor(input_details["index"], test_image)
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
# Compute mask from segmentaion output
mask = np.reshape(output, (512, 512)) > 0.5
mask = (mask * 255).astype(np.uint8)
# resize the mask output
bin_mask = cv2.resize(mask, (w, h))
# extract the potrait
image = np.dstack((im, bin_mask))
# make background white
face = image[:, :, :3].copy()
mask = image[:, :, 3].copy()[:, :, np.newaxis] / 255.0
face_white_bg = (face * mask + (1 - mask) * 255).astype(np.uint8)
# convert image to PIL format
mask = Image.fromarray(bin_mask)
im = Image.fromarray(face_white_bg)
return im, mask
def upscale(img, model_cran_v2):
# convert pil image to tensor
img_t = transforms.ToTensor()(img).unsqueeze(0)
# used to compare the origin
img = img.resize((img.size[0] // 2, img.size[1] // 2), Image.BICUBIC)
img_splitter = ImageSplitter(seg_size=64, scale_factor=2, boarder_pad_size=3)
img_patches = img_splitter.split_img_tensor(img, scale_method=None, img_pad=0)
with torch.no_grad():
out = [model_cran_v2(i) for i in img_patches]
img_upscale = img_splitter.merge_img_tensor(out)
save_image(img_upscale, "app/removal.png")
return Image.open("app/removal.png")
| [
"ashish369@live.com"
] | ashish369@live.com |
5d8197d9782fdee45aca92a2884c0f7069f30274 | 1a38ae9466f84da7b7ee3cce2cb7f94bfdf40492 | /connect_four.py | 2c0dce3ed0d791c8977c7598b42c7a53f5b498e0 | [] | no_license | ayushmann01/connectFour | 74c138ecc33db42275de1e857ee3f85dcec85ad6 | a774eb32c0bc594e25a24f3601109271b4a42e7a | refs/heads/master | 2022-09-24T09:17:21.178212 | 2020-06-02T20:29:08 | 2020-06-02T20:29:08 | 268,901,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,021 | py | import numpy
import pygame
import sys
import math
ROW_COUNT = 6
COLUMN_COUNT = 7
BLUE = (0, 0, 250)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
# pygame initialization
pygame.init()
SQUARESIZE = 100
width = COLUMN_COUNT * SQUARESIZE
height = (ROW_COUNT + 1) * SQUARESIZE
RADIUS = int(SQUARESIZE / 2 - 5)
size = (width, height)
screen = pygame.display.set_mode(size)
myfont = pygame.font.SysFont("monospace", 70)
# functions
def createBoard():
board = numpy.zeros((ROW_COUNT, COLUMN_COUNT))
return board
def print_board(board):
print(numpy.flip(board, 0))
def drop_piece(board, row, col, piece):
board[row][col] = piece
def is_valid_location(board, col):
return board[ROW_COUNT - 1][col] == 0
def get_next_open_row(board, col):
for row in range(ROW_COUNT):
if board[row][col] == 0:
return row
def winning_move(board, piece):
# check horizontal location for win
for c in range(COLUMN_COUNT - 3):
for r in range(ROW_COUNT):
if board[r][c] == piece and board[r][c + 1] == piece and board[r][c + 2] == piece and board[r][ c + 3] == piece:
return True
# check vertical location for win
for c in range(COLUMN_COUNT - 3):
for r in range(ROW_COUNT):
if board[r][c] == piece and board[r + 1][c] == piece and board[r + 2][c] == piece and board[r + 3][c] == piece:
return True
# check positively sloped diagonals
for c in range(COLUMN_COUNT - 3):
for r in range(ROW_COUNT - 3):
if board[r][c] == piece and board[r + 1][c + 1] == piece and board[r + 2][c + 2] == piece and board[r + 3][ c + 3] == piece:
return True
# check negatively sloped diagonals
for c in range(COLUMN_COUNT - 3):
for r in range(3, ROW_COUNT):
if board[r][c] == piece and board[r - 1][c + 1] == piece and board[r - 2][c + 2] == piece and board[r - 3][c + 3] == piece:
return True
def draw_board(board):
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT):
pygame.draw.rect(screen, BLUE, (c * SQUARESIZE, r * SQUARESIZE + SQUARESIZE, SQUARESIZE, SQUARESIZE))
pygame.draw.circle(screen, BLACK, (
int(c * SQUARESIZE + SQUARESIZE / 2), int(r * SQUARESIZE + SQUARESIZE + SQUARESIZE / 2)), RADIUS)
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT):
if board[r][c] == 1:
pygame.draw.circle(screen, RED, (
int(c * SQUARESIZE + SQUARESIZE / 2), height - int(r * SQUARESIZE + SQUARESIZE / 2)), RADIUS)
elif board[r][c] == 2:
pygame.draw.circle(screen, YELLOW, (
int(c * SQUARESIZE + SQUARESIZE / 2), height - int(r * SQUARESIZE + SQUARESIZE / 2)), RADIUS)
pygame.display.update()
board = createBoard()
print_board(board)
draw_board(board)
pygame.display.update()
game_over = False
turn = 0
# main game loop
while not game_over:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEMOTION:
pygame.draw.rect(screen, BLACK, (0, 0, width, SQUARESIZE))
posx = event.pos[0]
if turn == 0:
pygame.draw.circle(screen, RED, (posx, int(SQUARESIZE/2)), RADIUS)
else:
pygame.draw.circle(screen, YELLOW, (posx, int(SQUARESIZE / 2)), RADIUS)
pygame.display.update()
if event.type == pygame.MOUSEBUTTONDOWN:
# print(event.pos)
pygame.draw.rect(screen, BLACK, (0, 0, width, SQUARESIZE))
# Ask for player 1 input
if turn == 0:
posx = event.pos[0]
col = int(math.floor(posx / SQUARESIZE))
if is_valid_location(board, col):
row = get_next_open_row(board, col)
drop_piece(board, row, col, 1)
if winning_move(board, 1):
print("Player 1 wins^^^ Congrats!!!")
label = myfont.render("Player 1 wins!!!", 1, RED)
screen.blit(label, (35, 10))
game_over = True
# Ask for player 2 input
else:
posx = event.pos[0]
col = int(math.floor(posx / SQUARESIZE))
if is_valid_location(board, col):
row = get_next_open_row(board, col)
drop_piece(board, row, col, 2)
if winning_move(board, 2):
# print("Player 1 wins^^^ Congrats!!!")
label = myfont.render("Player 2 wins!!!", 1, YELLOW)
screen.blit(label, (35, 10))
game_over = True
print_board(board)
draw_board(board)
turn += 1
turn %= 2
if game_over:
pygame.time.wait(3000)
| [
"ayushmanraghav.ra@gmail.com"
] | ayushmanraghav.ra@gmail.com |
dedd1b7419b7e144c4b15461094538f0b7fc9a1b | 2b54f28c6c46962b77f53dcc1370a3e2d60f7e41 | /test/api/test_main.py | 3c98faa28a409c15993304b4c2ef7ce9c69aafc5 | [] | no_license | tales-cp/candidate-assessment | fb75cb8c8e585a4567d8c3ceaac835b9d0e7915a | dd82bfd5333ba7349a2e113796834a5ca09cdf62 | refs/heads/main | 2023-03-04T13:38:14.092212 | 2021-02-01T03:57:25 | 2021-02-01T03:57:25 | 334,732,197 | 0 | 0 | null | 2021-01-31T18:54:21 | 2021-01-31T18:54:21 | null | UTF-8 | Python | false | false | 269 | py | import http
from fastapi.testclient import TestClient
from project.api.main import app
client = TestClient(app)
class TestMainAPI:
def test_health_check(self) -> None:
response = client.get("/")
assert response.status_code == http.HTTPStatus.OK
| [
"tales_padua@yahoo.com.br"
] | tales_padua@yahoo.com.br |
2adff8a00299133ecea95a2ce55a9f1bde0b2cfa | 080608425194de78242606de286f26b616ce997b | /stage2/fill-in-the-blanks.py | 7d29902190c6cf0fc7f58d0dcf2ca01e424f6493 | [] | no_license | julianestebanquintana/IFND | 16f10d79f085b1d17fcce5dbdc1df1ac2e9b3b33 | 6843999a510a14ac28d47611afa0439ff0f36f6a | refs/heads/master | 2021-01-10T09:31:52.738353 | 2016-02-29T22:57:49 | 2016-02-29T22:57:49 | 50,877,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,546 | py | #################### DEFINITION OF VARIABLES ######################
questionary = [['To communicate with computers and make them do whatever is needed, we use programming languages; these have a different set of rules than natural languages (called a ______).', 'syntax'],
['Internet is a network of computers, they communicate with each other through a protocol called ______, and they transfer files from one to another.', 'http'],
['Html gives to web pages the structure; the elements in it are tree-like, meaning that there is a branching pattern. CSS is a different language, used to give ______ to the web sites.', 'style'],
['The basic HTML element format is: < opening-tag > content < closing-tag > , although there are some tags without closing tag, called void tags. Some tags have also attributes, like class or id. In html everything is considered a box, and some boxes are inside bigger boxes (the branching pattern). There are tags that make this boxes, called block tags; but there are also tags that give structure to elements in the html without splitting it from the rectangle in which it is, and are called ______ elements.', 'inline'],
['Span is used to select an inline element, while div forms blocks. ______ is an attribute used for multiple alike elements, while id gives name to one element and must not be repeated in the same html.', 'class'],
['The HTML tree-like structure, where one has branches going down the document (or boxes containing other boxes, if better understood) is modeled by the ______, an API implemented in every browser to handle the structure of web pages; it models every element of an HTML document as an object, allowing the manipulation of these objects by other languages.', 'dom'],
['Css goes down through every element (Cascading) and applies the most specific rule to it. In the computer sciences, there is an abstract concept called ______; is a mechanism by which properties asigned to an element are applied to itself and its descending elements. This idea is important, since it avoids repetition.', 'inheritance'],
['In css we use the ______, which are called so because they permit to choose the element for style application. They are composed by the name of the tag or the dot and the name of one attribute. After it, between brackets, come the styling rules that will be applied to the selected elements.', 'selectors'],
['There are three methods to include style: To include CSS in the html head, using the style tag. To link a css file as a separate stylesheet, using the ______ tag in the html head. To write the style inline as a tag attribute, which is not a preferred method.', 'link'],
['Every element is considered part of a box, and every box is modeled as having, from outside to inside: ______, border, padding, content. Each of these can be modified to give the needed style.', 'margin'],
['Computational science is about how to solve problems. Programming is the main thing in computational science. Most of the human made machines are designed to accomplish only one single work. Computers by themselves cant accomplish any labor; but computers with programs, can do a lot of different things. A computer program is a simple sequence of instructions, sequence that the computer must follow closely; these different instructions are made using a ______ language, and python is just one of them.', 'programming'],
['Programming languages are different from natural languages in that are less verbose, less redundant and specially unambiguous, as well as space saving. Most languages today have a set of rules (a syntax) described as ______ form; they are composed by expressions, being an expression an abstract concept that has some value. The expressions can be combined by the construction <non-terminal> -> replacement, where the arrow signals an assignment operation.', 'Backus - Naur'],
['In Backus-Naur form,every expression can be successively replaced until it gets to a ______ value, where it can be no longer replaced. Some other rules come from this construct, like expression -> expression operator expression, expression -> number or expression -> (expression).', 'terminal'],
['A variable is a name that refers to a value. In python, a variable is assigned using the form <variable name> = expression, where = corresponds to the ______ symbol. As a convention, the first letter of the variable must be lowercase. Care must be taken to avoid confusing mathematical equal to with the = operator = used in python.', 'assignment'],
['In python, text is represented as a ______, which is a sequence of characters (letters, digits and symbols); can contain any number of characters and is built by surrounding text by single or double quotes.', 'string'],
['The operator +, when used with two strings, concatenates them; also, using the operator * for multiplying a string times an integer, produces a bigger string composed of multiple times the initial string. Strings can be indexed and sliced; in python, the first component the string is indexed as ______. The method for indexation is using square brackets after the variable.', 'zero'],
['The ______ function sends an output to the console. To manufacture a multiline string, that includes returns and spaces, triple single quotes or triple double quotes are used.', 'print'],
['______ are procedures that are used frequently used, so instead of declaring them every time it is needed, it is defined in a generical way, and easily called when desired. To define one, we use the python reserved word def.', 'functions'],
['To use the function, is called by the name and passing it the necessary arguments. Every function has inputs and outputs. To get an output from a function, it must ______ something.', 'return'],
['If a variable is passed to a function, it does not get modified as a result of the function; the variables defined inside the function, that is, that are ______ to the function, are encapsulated and wont exist outside the function.', 'local'],
['In order to make decisions, it is used the ______ construct, which evaluates a test expression and, according to its value (True or False) performs the instructions of one block or another.', 'if'],
['The test expression is usually constructed with less than (<), more than (>), is equal to (==), is different to (!=), or and and. The end of the block is determined by ______.', 'indentation'],
['To perform a repetitive action, the while loop is used. The block is executed while the text expression is ______, and when it changes, the block execution stops; in order to avoid that a block keeps going forever, in every iteration of the block, something must change. Sometimes, an index variable is used to keep track of the execution, and when it reaches a certain value, it makes the test expression False, so the loop execution stops.', 'true'],
['While programming is unavoidable to have mistakes; its very important to develop a programming technique that allows to find easily the mistakes, and a method to find and correct programming mistakes systematically. The methodology to find and correct mistakes is called ______.', 'debugging'],
['Debugging has some important steps: Read the ______ message; in python, the message gives a type of error and number of line where execution stopped; although the error can be found there, it can also be upstream.', 'error'],
['One option is to copy a similar ______, and approach to the needed code, changing step by step, one small part every time, evaluating what happens when every small thing is changed.', 'function'],
['Bugs that dont stop code execution are harder to find and make programs work erratically; if one is not getting the expected results, it is useful to ______ intermediate results.', 'print'],
['Sometimes, code works partially; if one gets to a point where major changes must be done, one can screw up what is really working, so it is really important to not loose what is working well; it is preferable to comment the original code, save a copy or use version controllers, like ______.', 'git'],
['Recapitulating the ______ strategy: error messages must be examined, work from example code doing small step modifications, get sure that code from third parties work correctly, print intermediate results, and keep and compare old versions.', 'debugging'],
['Comments are very useful when debugging; single line comments are made with #. ______ are multiple line comments that the python interpreter retains as function documentation. Comments must be kept updated, while code is modified. The idea is that code must be concise, always consistent with the style chosen to document the code, and must give important information.', 'docstrings'],
['String is a data type that is structured; it is a sequence of characters that can be split into smaller pieces (characters). But strings arent complex enough, not everything can be done with a string, so python implements bigger structured data types. ______ are sequences of anything; in python, they are formed with square brackets, their elements are separated by commas and are indexed in the same way that strings.', 'lists'],
['Lists are ______, that meaning they can be modified after being created. Strings cant change after creation; elements can be appended to strings, but they form new strings, wont modify the originals. This seemingly subtle difference gets more notorious if one introduces a new variable and assigns it to same object: if a list changes with one name, also changes with the other; this is called aliasing: there are two ways to call the same object.', 'mutable']]
welcome_message = [ "Hello! Welcome to 'Test your knowledge of Programming'. ", "You will be exposed to some sentences, relating to the IPND of Udacity, lessons 1 & 2.", "Write your answers in lower case, and take well care of the spelling.", "When you finish typing your answer pulse Enter. Lets go!"]
user_answers = []
right_answers = [0,]
#################### DEFINITION OF FUNCTIONS USED IN THE GAME ######################
def list_slice(level, big_list):
""" Provided a chosen level, splits a list of questions and answers"""
if level == 1:
sliced_list = big_list[0:11]
elif level == 2:
sliced_list = big_list[11:20]
else:
sliced_list = big_list[20:]
return sliced_list
def get_level(big_list):
"""Prompts the user to choose a game level from 1 to 3"""
try:
level = int(input("Please enter the desired level, from 1 to 3: "))
if level > 0 and level < 4:
print "Now, starting with the level " + str(level)
list1 = list_slice(level, big_list)
return list1
else:
print "Not in the desired range."
return get_level(big_list)
except:
print("Sorry, I didn't understand that.")
return get_level(big_list)
def ask_for_answer():
"""Prompts the user to answer a question"""
answ = str(raw_input("What word replaces the SPACE in this sentence? "))
return answ
def print_elem_list(list_x):
"""Receives a list and prints it, one element at a time"""
for element in list_x:
print element
def present_questions(list_y, answers, correct_answers):
""" Presents questions to the user, and takes note of the answers given by the user"""
for element in list_y:
print " "
print element[0]
response = ask_for_answer()
i = 0
while i < 3:
if response == element[1]:
print "Congratulations! Right answer!"
correct_answers[0] += 1
print element[0].replace("______", element[1])
answers.append([response])
break
else:
print "Not the answer I was expecting. Try again"
i += 1
response = ask_for_answer()
answers.append([response])
def present_results(correct, given_answers, answers):
"""Given a number of correct answers, and a list of answers given by the student vs the list of the
real answers, presents the amount of correct answers and gives the elements for comparisson between
correct and wrong answers"""
print "You had " + str(correct[0]) + " right answers!"
print "Your answers were: "
print_elem_list(given_answers)
print "The right answers were these: "
for element in answers:
print element[1]
#################### THIS IS THE PART THAT INTERACTS WITH THE USER ######################
print_elem_list(welcome_message)
questions = get_level(questionary)
present_questions(questions, user_answers, right_answers)
present_results(right_answers, user_answers, questions)
#################### DEVELOPED BY JULIAN QUINTANA, FROM MEDELLIN - COLOMBIA ######################
| [
"julianestebanquintana@MacBook-Pro-de-Julian.local"
] | julianestebanquintana@MacBook-Pro-de-Julian.local |
292a9082291f7cc0b1f5aef8055dd29a1ae5b269 | e99de265b090c26ed6ba012deab4edb13461e774 | /ortools/sat/samples/copy_model_sample_sat.py | 80cb6253b2aaf1788d238c769639e261e49896ea | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | szerfas1/or-tools | 753f0324b5c408691f72033ec0d12a69c5028a23 | fa84bc05e72641dddfbb98164d81b8bc9bef6ea5 | refs/heads/stable | 2023-04-09T07:45:27.702891 | 2021-03-02T14:21:44 | 2021-03-02T14:21:44 | 358,003,762 | 0 | 0 | Apache-2.0 | 2021-04-14T18:32:17 | 2021-04-14T18:25:33 | null | UTF-8 | Python | false | false | 1,988 | py | # Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Showcases deep copying of a model."""
# [START program]
from ortools.sat.python import cp_model
def CopyModelSat():
"""Showcases printing intermediate solutions found during search."""
# Creates the model.
# [START model]
model = cp_model.CpModel()
# [END model]
# Creates the variables.
# [START variables]
num_vals = 3
x = model.NewIntVar(0, num_vals - 1, 'x')
y = model.NewIntVar(0, num_vals - 1, 'y')
z = model.NewIntVar(0, num_vals - 1, 'z')
# [END variables]
# Creates the constraints.
# [START constraints]
model.Add(x != y)
# [END constraints]
# [START objective]
model.Maximize(x + 2 * y + 3 * z)
# [END objective]
# Creates a solver and solves.
# [START solve]
solver = cp_model.CpSolver()
status = solver.Solve(model)
# [END solve]
if status == cp_model.OPTIMAL:
print('Optimal value of the original model: {}'.format(
solver.ObjectiveValue()))
# Copy the model.
copy = cp_model.CpModel()
copy.CopyFrom(model)
copy_x = copy.GetIntVarFromProtoIndex(x.Index())
copy_y = copy.GetIntVarFromProtoIndex(y.Index())
copy.Add(copy_x + copy_y <= 1)
status = solver.Solve(copy)
if status == cp_model.OPTIMAL:
print('Optimal value of the modified model: {}'.format(
solver.ObjectiveValue()))
CopyModelSat()
# [END program]
| [
"lperron@google.com"
] | lperron@google.com |
9ce7f7ecdf8e9315f04461d7914a303696ca3b46 | 4a3e4c20cc7de2e80ed038bbfe9b359ba75daf07 | /configs/RS-data/DOTA/train+val/FCOS/SENet/se-resnet101-caffe-center-FPN-R16.py | 28c2af5aa65deba151d0e2c9e38190d121f2e26e | [
"Apache-2.0"
] | permissive | CLIFF-BOT/mmdetection-1.0 | b0b4f9b5586476f96ef55cf68373110f28e503a7 | a16a7c6a8f0bf029100b85f0bd1b64093e7809af | refs/heads/master | 2023-01-02T07:02:35.496174 | 2020-10-29T03:26:18 | 2020-10-29T03:26:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,209 | py | # model settings
model = dict(
type='FCOS',
pretrained='open-mmlab://resnet101_caffe',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False, # use P5
num_outs=5,
relu_before_extra_convs=True,
attention=dict(
type='SENet',
inplanes=256,
reduction=16,
bias=True)),
bbox_head=dict(
type='FCOSHead',
num_classes=17,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
center_sampling=True,
center_sample_radius=1.5))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=-1)
# dataset settings
dataset_type = 'DOTA1_5Dataset'
data_root = 'data/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1024, 1024), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1024, 1024),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'DOTA/train1024/DOTA1_5_train1024.json',
img_prefix=data_root + 'DOTA/train1024/images',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'DOTA/val1024/DOTA1_5_val1024.json',
img_prefix=data_root + 'DOTA/val1024/images',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'DOTA/test1024/DOTA1_5_test1024.json',
img_prefix=data_root + 'DOTA/test1024/images',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD',
lr=0.0025,
momentum=0.9,
weight_decay=0.0001,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='constant',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
evaluation = dict(interval=12)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
find_unused_parameters=True
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/fcos_r50_caffe_fpn_gn_1x_4gpu'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"nicholasirving@outlook.com"
] | nicholasirving@outlook.com |
045a6be87833d5889a4a60e4d0710d0699af059c | 34a427948b4cf8bffd977af9e4d0cd560808fe88 | /ARP_scanner_using_ip_as_command_argument.py | ae83a18a0e5eedaa193aaa38fbc874227546e439 | [] | no_license | tonnyshm/Networking-Tools | 4c7f0446d7fbc0877ca75902d25d002a110d9801 | 87d185765e321462639c1b4e483831d24d9d0686 | refs/heads/master | 2022-04-29T23:07:23.585941 | 2020-04-25T13:18:47 | 2020-04-25T13:18:47 | 258,777,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | #!/usr/bin/env python
import scapy.all as scapy
import optparse
def scan(ip):
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast/arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]
client_list = []
for element in answered_list:
client_dict = {"ip": element[1].psrc, "mac": element[1].hwsrc}
client_list.append(client_dict)
return client_list
def get_ip():
parser = optparse.OptionParser()
parser.add_option("-t", "--target", dest="ip_address", help="IP address/IP range")
captured_ip , arguments = parser.parse_args()
return captured_ip
def print_result(results_list):
print("IP\t\t\t MAC ADDRESS\n----------------------------------------------------")
for client in results_list:
print(client["ip"] + "\t\t" + client["mac"])
captured_ip = get_ip()
scan_result = scan(captured_ip.ip_address)
print_result(scan_result)
| [
"tonny.shema@gmail.com"
] | tonny.shema@gmail.com |
1db6e93bcc75a0f658463b5bb1f5b2c42b37d8cc | 20f4503aec624da9ba4161e7ee11f121df125be1 | /main.py | ddad880abdfc862f74da3b2f95ac6d37ce6787a4 | [] | no_license | Fdojeda/codigos | a9b178bef3aee6c1351953bbcc74f064823b555d | 11993a4b6d11a8957ba21eada5f0875b3996fd94 | refs/heads/master | 2022-12-16T19:35:44.022963 | 2020-09-17T18:36:03 | 2020-09-17T18:36:03 | 289,083,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | from menus import Menu_Inicio
from cargar_datos_y_deportes import Ciclismo
from entidades import Deportistas
juego = Menu_Inicio()
print(juego.ejecutar())
| [
"fdojeda@uc.cl"
] | fdojeda@uc.cl |
70737cee89450355c2633629a95169eec78ed415 | cb703e45cf56ec816eb9203f171c0636aff0b99c | /Dzien01/06-oop.py | 67b18d2f215297256e7a0f4c431841266248e762 | [] | no_license | marianwitkowskialx/Enzode | dc49f09f086e4ca128cd189852331d3c9b0e14fb | 67d8fd71838d53962b4e58f73b92cb3b71478663 | refs/heads/main | 2023-06-04T20:58:17.486273 | 2021-06-24T16:37:53 | 2021-06-24T16:37:53 | 366,424,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py |
# OOP
class MetaProduct:
pass
class Product:
def __init__(self, id, name, price):
self.__id = id
self.__name = name
self.__price = price
def get_info(self):
return f"Id={self.__id}, nazwa:{self.__name}, price: {self.__price}"
def set_price(self, new_price):
self.__price = new_price
def __str__(self):
return self.get_info()
# Dziedziczenie klas
class Alcohol(Product):
def __init__(self, id, name, price, type="vodka"):
super(Alcohol, self).__init__(id, name, price)
self.__type = type
def __str__(self):
s = f"{super().__str__()}, type={self.__type}"
return s
#############
coca_cola = Product(1, "Coca-cola", 4.99)
print(coca_cola.get_info())
coca_cola.set_price(4.29)
print(coca_cola)
#print(dir(coca_cola))
coca_cola._Product__price = 999 # nadpisanie pola "prywatnego" z zewnątrz obiektu
print(coca_cola)
smirnoff = Alcohol(2, "Smirnoff", 88, "vodka")
print(smirnoff)
| [
"noreply@github.com"
] | marianwitkowskialx.noreply@github.com |
c3d64bef7ce1750281a51d09ee118b162fcfda8b | b0fcc5332dd74ccf8d0234ec40cb1cb62a2b935c | /gen-inst/instance_generator.py | 8e7dd5c4c02282e80b3968b657c8f2d2653d421b | [] | no_license | Year2999/MSPSP-InstLib | 7058bb7709200b4af4307e273be57dac6baa28d0 | f77644175b84beed3bd365315412abee1a15eea1 | refs/heads/master | 2020-05-02T18:08:11.410604 | 2017-08-14T08:18:05 | 2017-08-14T08:18:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,987 | py | # Data61 Summer Internship 2016/17
# Kenneth Young
# Instance Generator for the Multi-Skill PSP
# This file contains:
# Functions to generate all components of an input instance to the MSPSP
# Instance generator driver function
# Packages
from __future__ import division
import sys, pdb, time
import numpy as np
import networkx as nx
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# User-defined functions
from instance_processing import *
#----------------------------------------------------------------------------------------#
# PRECEDENCE GRAPH GENERATION
# input: # of activites, # of start activities, # of end activities, max # of preds and
# succs, network complexity
# output: The edges of an AON graph
def generate_prec_graph(debugging, n, nStart, nFinish, MaxPred, MaxSucc, NC):
count = 0
while True:
# Step 0: Initialise
PrecGraph = initialise_prec_graph(n, nStart, nFinish)
# Step 1: Add predecessors
[PrecGraph, NonRedEdges] = add_predecessors(n, nStart, nFinish, MaxSucc, PrecGraph)
# Step 2: Add successors
[PrecGraph, NonRedEdges] = add_successors(n, nStart, nFinish, MaxSucc, PrecGraph, NonRedEdges)
# Step 3: Achieve desired network complexity
# ReqNumEdges = np.ceil(NC*(n+1)) # required number of edges for the given network complexity
ReqNumEdges = np.ceil(NC*(n))
# print NonRedEdges, ' ', ReqNumEdges
if NonRedEdges > ReqNumEdges:
PrecGraph = decrease_network_complexity(n, nFinish, NonRedEdges,
ReqNumEdges, PrecGraph)
else:
PrecGraph = increase_network_complexity(n, nStart, nFinish, MaxPred, MaxSucc,
NonRedEdges, ReqNumEdges, PrecGraph)
if PrecGraph == False:
print '...unachievable network complexity. Regenerating network... (%d)' %(count)
count=count+1
else:
break
return PrecGraph
# input: # of activites, # of start activities, # of end activities
# output: edge set containing dummy start and end edges
def initialise_prec_graph(n, nStart, nFinish):
# initialise NetworkX graph data structure
PrecGraph = nx.DiGraph()
PrecGraph.add_nodes_from(range(n+2))
# define dummy edges from node 0 to all start nodes
for i in range(1,nStart+1):
PrecGraph.add_edge(0,i)
# define dummy edges from finish nodes to node n+1
for i in range(n-nFinish+1,n+1):
PrecGraph.add_edge(i,n+1)
return PrecGraph
def add_predecessors(n, nStart, nFinish, MaxSucc, PrecGraph):
# randomly selects a predecessor i for each non-dummy activity
NonRedEdges = nStart + nFinish # number of non-redundant edges
j = nStart + 1
while j < n+1:
while PrecGraph.predecessors(j) == []:
if j >= (n - nFinish + 1):
i = np.random.randint(1,high=n-nFinish+1)
else:
i = np.random.randint(1,high=j-1+1)
if len(PrecGraph.successors(i)) < MaxSucc:
PrecGraph.add_edge(i,j)
NonRedEdges = NonRedEdges + 1
j = j + 1
return [PrecGraph, NonRedEdges]
def add_successors(n, nStart, nFinish, MaxPred, PrecGraph, NonRedEdges):
# randomly selects a successor u for each non-dummy activity
# begin with j as the last possible activity without a successor
j = n - nFinish
while j > 0:
AllPreds = find_all_predecessors(j, PrecGraph, [])
# print 'j = ', j
# print 'AllPreds = ', AllPreds
while PrecGraph.successors(j) == []:
# if j has no successor then randomly choose a possible successor
if j <= nStart:
u = np.random.randint(nStart+1, high=n+1)
else:
u = np.random.randint(j+1, high=n+1)
# compute the set of redundant successors if u in added
RedSuccs = list(set().union([item for i in AllPreds for item in PrecGraph.successors(i)]))
if len(PrecGraph.predecessors(u)) < MaxPred and u not in RedSuccs:
PrecGraph.add_edge(j,u)
NonRedEdges = NonRedEdges + 1
j = j - 1
return [PrecGraph, NonRedEdges]
def is_edge_redundant(i,j,PrecGraph):
AllPreds_i = find_all_predecessors(i, PrecGraph, [])
AllSuccs_i = find_all_successors(i, PrecGraph, [])
AllSuccs_j = find_all_successors(j, PrecGraph, [])
antecedent = False # store value of the complicated if condition
for u in AllSuccs_j:
if list(set(PrecGraph.predecessors(u)) & set(AllPreds_i)) != []:
antecedent = True
break
if ( j in AllSuccs_i or \
antecedent or \
set(PrecGraph.predecessors(j)) & set(AllPreds_i) or \
set(PrecGraph.successors(i)) & set(AllSuccs_j)
):
return True
return False
def increase_network_complexity(n, nStart, nFinish, MaxPred, MaxSucc, NonRedEdges, ReqNumEdges, PrecGraph):
# add edges to the graph until the required network complexity is achieved
# only run while loop for MAXLIMIT seconds maximum
start_time = time.time()
TIMELIMIT = 0.05
while NonRedEdges <= ReqNumEdges:
# randomly chose a node which can have another successor added
i = np.random.randint(1, high=n-nFinish+1)
if len(PrecGraph.successors(i)) < MaxSucc:
# randomly chose a node j which can be a new successor of i
if i <= nStart:
NonSuccessors = [node for node in range(nStart+1,n+1) if node not in PrecGraph.successors(i)]
j = np.random.choice(NonSuccessors)
else:
NonSuccessors = [node for node in range(i+1,n+1) if node not in PrecGraph.successors(i)]
if NonSuccessors == []:
continue
else:
j = np.random.choice(NonSuccessors)
if len(PrecGraph.predecessors(j)) < MaxPred:
# check if the candidate edge is redundant
if is_edge_redundant(i,j,PrecGraph):
continue
else:
PrecGraph.add_edge(i,j)
NonRedEdges = NonRedEdges + 1
# regenerate precedence graph if network complexity if unattainable
if time.time() - start_time > TIMELIMIT:
PrecGraph = False
break
return PrecGraph
# remove edges from the graph until the required network complexity is achieved
def decrease_network_complexity(n, nFinish, NonRedEdges, ReqNumEdges, PrecGraph):
while NonRedEdges > ReqNumEdges:
i = np.random.randint(1, high=n-nFinish+1)
if len(PrecGraph.successors(i)) > 1:
j = np.random.choice(PrecGraph.successors(i))
if len(PrecGraph.predecessors(j)) > 1:
PrecGraph.remove_edge(i,j)
NonRedEdges = NonRedEdges - 1
return PrecGraph
#----------------------------------------------------------------------------------------#
# RESOURCE GENERATION
# input: # of resources, # of skills, maximum # of skills any resource can master
# output: boolean resource mastery array
def generate_resources(debugging, m,l,MaxSkill):
Resources = range(m)
Skills = range(l)
while True:
ResMastery = np.zeros((m,l), dtype=np.int)
SkillMastery = np.zeros(l, dtype=np.int)
for res in Resources:
# randomly decide how many skills this resource has
nMast = np.random.randint(1,high=MaxSkill+1)
Mast = np.random.choice(Skills, size=nMast, replace=False)
for skill in Mast:
# print 'res = ', res
# print 'skill = ', skill
# print ''
ResMastery[res][skill] = 1
# record that this skill has at least one master
if SkillMastery[skill] == 0:
SkillMastery[skill] = 1
# if all skills are now mastered then we are done
if 0 not in SkillMastery:
break
return ResMastery
#----------------------------------------------------------------------------------------#
# ACTIVITY GENERATION
# input:
# output:
def generate_activities(debugging, n, m, l, ResMastery, SF, MRS, MinProcTime, MaxProcTime, MaxResAct, MaxResSkill):
# Step 1: Processing times definition
ProcTime = generate_processing_times(n, MinProcTime, MaxProcTime)
# Step 2: Definition of the skill requirements
[ResReq, rho] = generate_skill_requirements(n, m, l, ResMastery, SF)
# Step 3: Achieve desired modified resource strength
ResReq = increase_modified_resource_strength(n, m, l, MRS, ResMastery, ResReq,
MaxResAct, MaxResSkill, rho)
return ProcTime, ResReq
def generate_processing_times(n, MinProcTime, MaxProcTime):
# generate processing times for all activities
ProcTime = np.zeros(n+2, dtype=np.int)
for act in range(1,n+1):
ProcTime[act] = np.random.randint(MinProcTime, high=MaxProcTime+1)
return ProcTime
def generate_skill_requirements(n, m, l, ResMastery, SF):
# give all skills random requirements which are able to be fulfilled
# initialise resource requirements as zeros
ResReq = np.zeros((n+2,l), dtype=np.int)
if SF > 0 and SF <= 1:
lambd = SF * l
while (~ResReq.any(axis=0)).any():
act = 1
rho = 0 # overall number of already associated resources
while act < n+1:
if SF <= 0 or SF > 1:
# a non-valid SFvalue has been provided, so use a variable value of SF
lambd = np.random.randint(2,high=l+1)
# pdb.set_trace()
while np.sum(ResReq[act]) < lambd:
skill = np.random.randint(0,high=l)
# print 'row = ', ResReq[act]
# print 'lambd = ', lambd
# print 'act = ', act
# print 'skill = ', skill
# print ResReq
# print ''
if ResReq[act][skill] != 1:
ResReq[act][skill] = 1
rho = rho + 1
act = act + 1
return [ResReq, rho]
def increase_modified_resource_strength(n, m, l, MRS, ResMastery, ResReq, MaxResAct, MaxResSkill, rho):
# adds to the resource requirement of the activities to achieve the given MRS
Skills = range(l)
# print rho
# print np.floor(m/MRS)
# print ''
while rho < np.floor(m/MRS):
act = np.random.randint(1,high=n+1)
ActSkillSet = [skill for skill in Skills if ResReq[act][skill] > 0]
# print 'act = ', act
# print 'skill set = ', [x+1 for x in ActSkillSet]
# print ''
# pdb.set_trace()
if np.sum([ResReq[act][s] for s in ActSkillSet]) < MaxResAct:
skill = np.random.choice(ActSkillSet)
if ResReq[act][skill] < MaxResSkill:
ResReq[act][skill] = ResReq[act][skill] + 1
if is_flow_feasible(m, ResMastery, ResReq[act], ActSkillSet):
rho = rho + 1
else:
ResReq[act][skill] = ResReq[act][skill] - 1
return ResReq
def is_flow_feasible(m, ResMastery, SkillReq, SkillSet):
Resources = range(m)
# initialise directed graph to run flow algorithm on
G = nx.DiGraph()
G.add_node('s', demand=-np.sum(SkillReq)) # source
G.add_node('t', demand= np.sum(SkillReq)) # sink
# pdb.set_trace()
UsefulRes = find_useful_resources(Resources, ResMastery, SkillSet)
# define resource nodes and edges from the source to resources
for res in UsefulRes:
G.add_node(res, demand=0)
G.add_edge('s', res, weight=0, capacity=1)
# define required skill nodes and edges from skills to sink node
for skill in SkillSet:
# offset the index by the # of res
G.add_node(skill+m, demand=0)
G.add_edge(skill+m, 't', weight=0, capacity=SkillReq[skill])
# define the edges between the resource and the skill nodes
for res in Resources:
for skill in SkillSet:
if ResMastery[res][skill] == 1:
G.add_edge(res, skill+m, weight=0, capacity=1)
# pdb.set_trace()
try:
flowDict = nx.min_cost_flow(G)
except nx.exception.NetworkXUnfeasible:
return False
return True
#----------------------------------------------------------------------------------------#
# DRIVER
# input:
# output:
def generate_instance(debugging, n, nStart, nFinish, MaxPred, MaxSucc, NC, m, l, MaxSkill, SF, MRS, MinProcTime, MaxProcTime, MaxResAct, MaxResSkill):
print 'Generating precedence network...'
# create the precedence graph for the given parameter values
PrecGraph = generate_prec_graph(debugging, n, nStart, nFinish, MaxPred, MaxSucc, NC)
# output graph
if debugging:
pos = nx.shell_layout(PrecGraph)
nx.draw_networkx_nodes(PrecGraph, pos=pos, nodelist = PrecGraph.nodes())
nx.draw_networkx_edges(PrecGraph, pos=pos, edgelist = PrecGraph.edges())
nx.draw_networkx_labels(PrecGraph, pos=pos)
plt.savefig("PrecGraph.png")
print 'Generating resources...'
# create the resources and define their skill sets
ResMastery = generate_resources(debugging, m, l, MaxSkill)
if debugging:
print 'Resource Mastery Array:\n', ResMastery, '\n'
print 'Generating activities...'
[ProcTime, ResReq] = generate_activities(debugging, n, m, l, ResMastery,
SF, MRS, MinProcTime, MaxProcTime, MaxResAct, MaxResSkill)
if debugging:
print 'Processing Times:\n', ProcTime, '\n'
print 'Activity Skill Requirement Array:\n', ResReq, '\n'
# instance = [PrecGraph, ResMastery, ProcTime, ResReq]
return [PrecGraph, ResMastery, ProcTime, ResReq]
# input:
# output:
def main():
debugging = 0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Input Parameters
SetNum = 1
NumInstances = 6
SaveSumOfResReq = False
n = define_n(SetNum)
SFList = [1, 0.75, 0.5, 0]
# NCList = [1.5, 1.8, 2.1]
# SFList = [0.5, 0]
NCList = [2.1]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Fixed Parameters
nStart=3
nFinish=3
MaxPred=3
MaxSucc=3
l=4 # no. of skills
MaxSkill=3
MinProcTime = 1
MaxProcTime = 10
MaxResSkill = define_max_res_skill(n)
for SF in SFList:
for NC in NCList:
mList = define_m_list(SF,n)
for m in mList:
MaxResAct = m # max no. of resources per activity
for InstNum in range(NumInstances):
MRS = define_MRS(n,m,l,SF)
print ' # | n | m | SF\t| NC\t| MRS\t'
print ' %02d | %d | %d | %4.2f\t| %3.1f\t| %6.5f\t' %(InstNum,n,m,SF,NC,MRS)
# seed the randomisation
seed = np.random.randint(1000000, high=9999999)
np.random.seed(seed)
# create instance
[PrecGraph, ResMastery, ProcTime, ResReq] = generate_instance(debugging, n,
nStart, nFinish, MaxPred, MaxSucc, NC, m, l, MaxSkill,
SF, MRS, MinProcTime, MaxProcTime, MaxResAct, MaxResSkill)
UB = find_naive_upper_bound(ProcTime)
LB = find_naive_lower_bound(n,PrecGraph,ProcTime)
# save instance
filename = 'minizinc-instances/inst_set%s_sf%s_nc%s_n%s_m%s_%02d.dzn' \
%(str(SetNum), str(SF), str(NC), str(n), str(m), InstNum)
if debugging:
print 'Saving instance to path\n\t %s\n' %(filename)
else:
print ''
save_instance_as_dzn(n, m, l, PrecGraph, ResMastery, ProcTime,
ResReq, UB, LB, seed, filename, SaveSumOfResReq)
if __name__ == "__main__":
main()
| [
"kendogy@gmail.com"
] | kendogy@gmail.com |
ac8dac173c77a7199c3b262a7c0e757a80dfe26b | bcd3c70e99c736eac44ce5dcb0643776fd141a3e | /tests/api/endpoints/test_send_upload_link.py | 4bd41f88aa175e3b129ce5550f2fc7f53e905516 | [
"Apache-2.0"
] | permissive | insky2005/seahub | 2ea6d41491cc27a091e8decda3667328614ce0dc | 99f0c340e50e1b30b7222ac870e5f0d15a8eb7f6 | refs/heads/master | 2021-01-24T15:33:08.683090 | 2016-04-08T02:02:18 | 2016-04-08T02:02:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | #coding: UTF-8
import json
from django.core.urlresolvers import reverse
from seahub.utils import IS_EMAIL_CONFIGURED
from seahub.test_utils import BaseTestCase
from seahub.share.models import UploadLinkShare
class SendUploadLinkApiTest(BaseTestCase):
def setUp(self):
uls = UploadLinkShare.objects.create_upload_link_share(self.user.username,
self.repo.id, '/')
self.token = uls.token
def tearDown(self):
self.remove_repo()
def test_can_send_email(self):
self.login_as(self.user)
invalid_email = 'invalid'
url = reverse("api2-send-upload-link")
data = {
"token": self.token,
"email": self.admin.email + ',' + invalid_email,
}
resp = self.client.post(url, data)
if not IS_EMAIL_CONFIGURED:
self.assertEqual(403, resp.status_code)
else:
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert json_resp['success'][0] == self.admin.email
assert json_resp['failed'][0]['email'] == invalid_email
def test_can_not_send_email_if_not_link_owner(self):
self.login_as(self.admin)
url = reverse("api2-send-upload-link")
data = {
"token": self.token,
"email": self.admin.email,
}
resp = self.client.post(url, data)
self.assertEqual(403, resp.status_code)
| [
"imwhatiam123@gmail.com"
] | imwhatiam123@gmail.com |
21068f6abe3bcd72409dea125efb285c7605bbbd | 1099e2a2fb11f53c82f2206112c60a6c0149dfde | /exercise-validator-cli-phoenix/gf/app_controller.py | 497fd14fbfd9ab1f75604fa60b58b8715141efe7 | [] | no_license | green-fox-academy/bkorbuly | f652e9e75067a8dba280e5e79c21696dae9b847e | f09f59f75d1b6d221147a6810ae8ba1d0b0f4840 | refs/heads/master | 2021-08-14T20:03:26.012200 | 2017-11-16T15:55:41 | 2017-11-16T15:55:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | from external.local import user_presenter
from gf.config import config_service
from gf.verify import verify_service
from gf.start import start_service
from gf.rate import rate_service
from gf.docopt_adapter import get_filename, get_link, get_command_argument
def run():
try:
task_check(get_command_argument())()
except KeyboardInterrupt:
user_presenter.keyboard_interrupted()
def task_check(task_name):
return {
"config": config_start,
"verify": verify_start,
"start": start,
"rate": rate_start,
"debug": show_debug
}[task_name]
def show_debug():
user_presenter.show_debug()
def help_message():
user_presenter.help_message()
def config_start():
config_service.start()
def verify_start():
verify_service.start(get_link())
def start():
start_service.main(get_filename())
def rate_start():
rate_service.start(get_filename())
| [
"b.korbuly@gmail.com"
] | b.korbuly@gmail.com |
1ef39d8f51378e2e4327ea08e562140f5bd63d05 | 0056abb227e467b8c97b40611cea3f3e2c4faa8e | /Project3/Code/eigenSolver.py | 9e5b4180e2586809a326f067dcffde16cb5099e3 | [] | no_license | michaesb/FYS-STK-Gruppe | dce6399037c4edfda19cbc72d2a7794a4837048a | ca7ebd685e0be7d24c0c63efbffd0774e2d03bbb | refs/heads/master | 2023-01-30T23:18:13.394089 | 2020-12-16T22:02:41 | 2020-12-16T22:02:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,427 | py | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
from tensorflow.keras.models import Sequential #This allows appending layers to existing models
from tensorflow.keras.layers import Dense #This allows defining the characteristics of a particular layer
from tensorflow.keras import optimizers #This allows using whichever optimiser we want (sgd,adam,RMSprop)
tf.keras.backend.set_floatx('float64')
class EigenSolver():
def __init__(self, A, x0, dt, T, eta = 0.01):
self._A = A
N = x0.shape[0]
x0 /= np.linalg.norm(x0) # normalize
self._x0 = tf.reshape(tf.convert_to_tensor(x0, dtype=tf.dtypes.float64), shape=(1, -1)) # row vector, since the NN outputs row vectors
Nt = int(T / dt) + 1
self._t_arr = np.linspace(0, T, Nt)
self._t = tf.reshape(tf.convert_to_tensor(self._t_arr, dtype=tf.dtypes.float64), shape=(-1, 1)) # column vector
self._zeros = tf.convert_to_tensor(np.zeros((N, Nt)))
# Setting up model
self._model = Sequential()
self._model.add(Dense(100, activation='sigmoid'))
self._model.add(Dense(50, activation='sigmoid'))
self._model.add(Dense(25, activation='sigmoid'))
self._model.add(Dense(N, activation="linear"))
self._model.build(self._t.shape)
self._optimizer = optimizers.Adam(eta)
def _x_net(self):
return tf.exp(-self._t) @ self._x0 + self._model(self._t) * (1 - tf.exp(-self._t))
def _loss(self):
with tf.GradientTape() as tape_t:
tape_t.watch([self._t])
x_net = self._x_net()
dx_dt = tape_t.batch_jacobian(x_net, self._t)[:, :, 0] # This takes the gradient of each element of x for each time step
dx_dt = tf.transpose(dx_dt) # We need to transpose, as x_net is a collection of row vectors,
x_net = tf.transpose(x_net) # but we need a collection of column vectors for the matrix multiplications
Ax = self._A @ x_net
xTx = tf.einsum("ij,ji->i", tf.transpose(x_net), x_net)
xTAx = tf.einsum("ij,ji->i", tf.transpose(x_net), Ax)
fx = xTx * Ax + (1 - xTAx) * x_net
return tf.losses.mean_squared_error(self._zeros, dx_dt - fx + x_net)
def train(self, iters):
start_time = time.time()
# Training the model by calculating gradient
for i in range(iters):
with tf.GradientTape() as tape:
current_loss = self._loss()
grads = tape.gradient(current_loss, self._model.trainable_variables)
self._optimizer.apply_gradients(zip(grads, self._model.trainable_variables))
total_time = time.time() - start_time
print(f"Finished training with a loss of {np.mean(self._loss())} after {total_time//60:.0f}m {total_time%60}s.")
def output(self):
# Output of model
return self._t_arr, self._x_net()
def evaluate(self, biggest = 1):
# Extracting eigenvector and value
t, x_t = self.output()
eig_vec = np.array(x_t[-1, :]) / np.linalg.norm(np.array(x_t[-1, :]))
eig_val = np.mean(self._A @ eig_vec / eig_vec)
eig_val_std = np.std(self._A @ eig_vec / eig_vec)
# Analytical eigenvectors and values
eigenvalues, v = np.linalg.eig(self._A)
eig_index = np.argmax(eigenvalues)
if biggest == 1:
eig_val_anal = eigenvalues[eig_index]
else:
eig_val *= -1
eig_val_anal = -eigenvalues[eig_index]
eig_vec_anal = v[:, eig_index]
eig_vec_anal *= np.sign(eig_vec[0] * eig_vec_anal[0]) # makes eigenvectors not point opposite direction
print(f"Eigenvalue = {eig_val:.5f} +- {eig_val_std:.5f}")
print(f"Real eigen = {eig_val_anal:5f}, diff = {eig_val - eig_val_anal:.5f}")
print(f"Eigenvector = {eig_vec}")
print(f"Real eigenvec = {eig_vec_anal}")
plt.figure()
plt.xlabel("t")
plt.ylabel("x(t)")
for i in range(len(eig_vec)):
plt.plot(t , x_t[:, i] / np.linalg.norm(x_t, axis=1), label=rf"$x_{i+1}$")
plt.gca().set_prop_cycle(None)
for i in range(len(eig_vec)):
plt.plot([t[-1] + 0.2], eig_vec_anal[i], marker="D", markersize=4)
plt.legend() | [
"KarlHenrik@users.noreply.github.com"
] | KarlHenrik@users.noreply.github.com |
1798fe340e001451dd3f7841460e5cccc795bcd7 | 4aaa03d414007552301c5fe4f5459fb652b7d79d | /chapter3/KNN.py | 0c9bd32acc9cb8c5beb95c8be7d35d5a9a7756eb | [] | no_license | panxmay/Web_Data_Mining | dc0c2815c68f44775ceb0243856f6851c849ab45 | c378b16179734a40fa396bc3be16369f7b24dc15 | refs/heads/master | 2022-11-18T09:50:18.307694 | 2020-07-24T13:59:04 | 2020-07-24T13:59:04 | 282,233,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,703 | py | from sklearn import datasets
import random
import math
import pandas as pd
def get_dataset():
iris = datasets.load_iris()
names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
dataset = pd.DataFrame(iris.data, columns=names)
dataset['class'] = iris.target
return dataset
def get_max_index(data):
max = -1
index = -1
for i in range(0, len(data)):
if data[i] > max:
index = i
max = data[i]
return index
def get_test_data(start, end, n):
test_data = random.sample(range(start, end), n)
return test_data
def compute_distance(d1, d2):
distance = 0
if len(d1) != len(d2):
print("无法计算距离")
return
for i in range(0, len(d1)):
distance += math.pow(d1[i] - d2[i], 2)
return math.sqrt(distance)
def delete_test_data(dataset, n, test_data):
test_index = get_test_data(0, 150, n)
for i in test_index:
test_data.append(dataset.loc[i, :])
dataset = dataset.drop(test_index)
dataset.reset_index(drop=True, inplace=True)
return dataset
def KNN(k, n):
dataset = get_dataset()
test_data = []
dataset = delete_test_data(dataset, n, test_data)
for i in test_data:
distance = []
for index in dataset.index:
distance.append([index, compute_distance(
i[:4], dataset.iloc[index, 0:4])])
distance.sort(key=lambda d: d[1], reverse=False)
classes = [0, 0, 0]
for j in range(0, k):
classes[dataset.iloc[distance[j][0], 4]] += 1
data_class = get_max_index(classes)
print(i)
print("预测结果:", data_class)
k = 10
n = 5
KNN(k, n)
| [
"panxmay@gmail.com"
] | panxmay@gmail.com |
11d1418d410fc266a990a254a5cace5a126ea43e | b900ba81e7745cf5bcfebc0694e6d6a3f65bee68 | /Practice/CNN/Great_Model/Resnet-50Model/VGG_16.py | 776a14762ecdcdddbcba58e6b411d13ef645a417 | [] | no_license | SUMAN147/PROJECT | 8a0d4f92cb794e713e24c34d923ef97d6d72c915 | 5b21a755af2910508d49c6336e56bbb489b67f7b | refs/heads/master | 2020-03-24T20:28:59.586588 | 2019-01-25T00:55:40 | 2019-01-25T00:55:40 | 142,980,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,348 | py | # _*_ coding: utf-8 _*_
''' VGG-16 model for keras
# Reference:
-[Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
'''
from __future__ import print_function
import numpy as np
import warnings
from keras.models import Model
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import GlobalMaxPooling2D
from keras.layers import GlobalAveragePooling2D
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.imagenet_utils import preprocess_input
from keras.applications.imagenet_utils import _obtain_input_shape
from keras.engine.topology import get_source_inputs
WEIGHTS_PATH = 'https://github.com/SUMAN147/Resnet-50Model/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/SUMAN147/Resnet-50Model/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
def VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000):
""" Instantiates the VGG16 architecture.
Optionlayy loads weight pre-trained on Imagenet. Note that when using Tensorflow, for best performance you should set `image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both Tensorflow and theano. The data format convention used by the model is the one specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected layers at the top of the network.
weight : one of `None` (random initialization) or "imagenet" (pre-training on ImageNet).
input_tensor : optional Keras tensor(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 244)` (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either ''`None` (random initialization) or `imagenet` ''(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine Proper input shape
input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=48, data_format=K.image_data_format(), include_top = include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
#Block1
x = Conv2D(64,(3,3), activation = 'relu', padding = 'same', name='block1_conv1')(img_input)
x = Conv2D(64,(3,3), activation = 'relu', padding= 'same', name='block1_conv2')(x)
x = MaxPooling2D((2,2), strides=(2,2), name='block1_pool')(x)
#Block2
x = Conv2D(128,(3,3), activation = 'relu', padding= 'same', name='block2_conv1')(x)
x = Conv2D(128,(3,3), activation = 'relu', padding= 'same', name='block2_conv2')(x)
x = MaxPooling2D((2,2), strides=(2,2), name='block2_pool')(x)
#Block3
x = Conv2D(256,(3,3), activation = 'relu', padding= 'same', name='block3_conv1')(x)
x = Conv2D(256,(3,3), activation = 'relu', padding= 'same', name='block3_conv2')(x)
x = Conv2D(256,(3,3), activation = 'relu', padding= 'same', name='block3_conv3')(x)
x = MaxPooling2D((2,2), strides=(2,2), name='block3_pool')(x)
#Block3
x = Conv2D(512,(3,3), activation = 'relu', padding= 'same', name='block4_conv1')(x)
x = Conv2D(512,(3,3), activation = 'relu', padding= 'same', name='block4_conv2')(x)
x = Conv2D(512,(3,3), activation = 'relu', padding= 'same', name='block4_conv3')(x)
x = MaxPooling2D((2,2), strides=(2,2), name='block4_pool')(x)
#Block3
x = Conv2D(512,(3,3), activation = 'relu', padding= 'same', name='block5_conv1')(x)
x = Conv2D(512,(3,3), activation = 'relu', padding= 'same', name='block5_conv2')(x)
x = Conv2D(512,(3,3), activation = 'relu', padding= 'same', name='block5_conv3')(x)
x = MaxPooling2D((2,2), strides=(2,2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096,activation='relu', name='fc1')(x)
x = Dense(4096,activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else
if pooling == 'avg'
x = GlobalAveragePooling2D()(x)
elif pooling == 'max'
x = GlobalMaxPooling2D()(x)
#Ensure that the model takes into account
# any potential predecessor of `input_tensor`
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else
inputs = img_input
#Create Model
model = Model(inputs, x, name='vgg16')
#load weights
if weights == 'imagenet':
if include_top:
weight_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',WEIGHTS_PATH,cache_subdir='models')
else
weight_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',WEIGHTS_PATH_NO_TOP,cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
return model
if __name__ == '__main__':
model = VGG16(include_top=True, weights='imagenet')
img_path = 'elephant.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shape:', x.shape)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds))
# End of Project
| [
"147sumansourav@gmail.com"
] | 147sumansourav@gmail.com |
859614ec81a12a9dea7997f2d6921b043b91bd6c | 8f6c7b9d641e002c1d66a740f431fd1f6b48371a | /callback_plugins/fail_if_no_hosts.py | 8b60c17cf5f825a40a0c88ef97794882cf50d716 | [
"Apache-2.0"
] | permissive | rbrady/tripleo-validations | 1740c546468f2019bed6b04443e67e1792fc7fdb | f1963a4e8396c9b833619e57d4b9509af887c2b0 | refs/heads/master | 2020-12-08T19:56:14.755187 | 2019-11-28T13:02:07 | 2020-02-10T09:00:16 | 233,080,253 | 0 | 0 | Apache-2.0 | 2020-01-10T15:52:45 | 2020-01-10T15:52:44 | null | UTF-8 | Python | false | false | 966 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_NAME = 'fail_if_no_hosts'
def __init__(self, display=None):
super(CallbackModule, self).__init__(display)
def v2_playbook_on_stats(self, stats):
if len(stats.processed.keys()) == 0:
sys.exit(10)
| [
"gchamoul@redhat.com"
] | gchamoul@redhat.com |
7a7c344248c899170ceda58153c41986bb2e0dd0 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_devprof_system_replacemsg_auth.py | d1536bd18254cb360762c92c9424856696d9ac63 | [
"MIT"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 8,357 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_devprof_system_replacemsg_auth
short_description: Replacement messages.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
devprof:
description: the parameter (devprof) in requested url
type: str
required: true
devprof_system_replacemsg_auth:
description: the top level parameters set
required: false
type: dict
suboptions:
buffer:
type: str
description: 'Message string.'
format:
type: str
description: 'Format flag.'
choices:
- 'none'
- 'text'
- 'html'
- 'wml'
header:
type: str
description: 'Header flag.'
choices:
- 'none'
- 'http'
- '8bit'
msg-type:
type: str
description: 'Message type.'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Replacement messages.
fmgr_devprof_system_replacemsg_auth:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
devprof: <your own value>
devprof_system_replacemsg_auth:
buffer: <value of string>
format: <value in [none, text, html, ...]>
header: <value in [none, http, 8bit]>
msg-type: <value of string>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/devprof/{devprof}/system/replacemsg/auth'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/devprof/{devprof}/system/replacemsg/auth/{auth}'
]
url_params = ['adom', 'devprof']
module_primary_key = None
module_arg_spec = {
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'adom': {
'required': True,
'type': 'str'
},
'devprof': {
'required': True,
'type': 'str'
},
'devprof_system_replacemsg_auth': {
'required': False,
'type': 'dict',
'options': {
'buffer': {
'required': False,
'type': 'str'
},
'format': {
'required': False,
'choices': [
'none',
'text',
'html',
'wml'
],
'type': 'str'
},
'header': {
'required': False,
'choices': [
'none',
'http',
'8bit'
],
'type': 'str'
},
'msg-type': {
'required': False,
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'devprof_system_replacemsg_auth'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_partial_curd()
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"sifang@cisco.com"
] | sifang@cisco.com |
d2e4e3064de9337880d4a005c8ef13f9c7c2f0e6 | 0e0670859d30ac0ca3cd889b496e066d44f1bad6 | /src/blog/admin.py | 0df8220f70e103dee9747b0968ccd8c3a7da566f | [] | no_license | marcinowski/devmate | 89dea8f153eb1b6f0adce0ea1144eb49ab32858e | cc13c31f41a278a7eb4eb2f04bf4574a880538f7 | refs/heads/master | 2021-05-14T23:47:25.245472 | 2017-10-24T13:11:57 | 2017-10-24T13:11:57 | 102,716,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | from django.contrib import admin
from .models import Tag, Article, Image
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ('id', 'tag', 'content_type', 'object_id')
list_filter = ('content_type',)
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
list_display = (
'id',
'author',
'created',
'last_updated',
'content',
'description',
'title',
'slug',
'status',
'publish_date',
'thumbnail',
)
list_filter = ('author', 'created', 'last_updated', 'publish_date')
raw_id_fields = ('related_posts',)
search_fields = ('slug',)
@admin.register(Image)
class ImageAdmin(admin.ModelAdmin):
list_display = ('id', 'path')
| [
"muszynskimarcin@wp.pl"
] | muszynskimarcin@wp.pl |
9f89201d32f8e5978be60fb9bf41c1bcbfe7bf42 | b4f92f72dbec0c9e03a5c9d11a6c2c14aba1ab2b | /cuisine/migrations/0003_meal_customer.py | 098c91ec41013282c2367faa0fad4b8bda28597c | [] | no_license | devmanorg/foodplan | ad61dd68f65d2afd2a780430d0c13088c5155c5f | f682af47793e3db4302933bd63627df2bc4fdd94 | refs/heads/main | 2023-07-16T16:18:38.204053 | 2021-09-01T03:02:58 | 2021-09-01T03:02:58 | 399,474,915 | 0 | 0 | null | 2021-09-01T00:47:51 | 2021-08-24T13:26:07 | Python | UTF-8 | Python | false | false | 674 | py | # Generated by Django 3.2.6 on 2021-08-25 07:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cuisine', '0002_auto_20210825_0651'),
]
operations = [
migrations.AddField(
model_name='meal',
name='customer',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='meals', to='auth.user', verbose_name='пользователь'),
preserve_default=False,
),
]
| [
"mashross@gmail.com"
] | mashross@gmail.com |
169230e4b2b4f6d8d4ed85bc81004a8234083182 | 031c66bb13e7fa21aec8cbfea74210b0458be96d | /utility/__init__.py | 1559457640c33168770b565cf36bf494fab2a1f5 | [] | no_license | cctecoo/personnel_management_system | 662880c684a74d66f221b23133fdff522e01dcf9 | 9c0d3d424311c0f8bf2ab42fcb34d5a52af789c8 | refs/heads/master | 2016-09-06T05:24:34.473551 | 2015-06-09T04:35:30 | 2015-06-09T04:35:30 | 33,525,154 | 0 | 0 | null | 2015-06-09T04:35:30 | 2015-04-07T06:02:04 | JavaScript | UTF-8 | Python | false | false | 64 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'cc'
| [
"358553880@qq.com"
] | 358553880@qq.com |
858fbf82eb6555838644cdefdf7c48696405baf2 | 22480f6a412b4ccf5f3ca02cee51a6bd8dde5d32 | /Hello.py | e8e21702b57f321a0c3f979bee6303cab4bd7df1 | [] | no_license | bethias/pi | 157ec493d9e287a98e9a0234e63291865f5fe7aa | 5ec284f9f5465e299ebb276085190f1632229080 | refs/heads/master | 2016-08-11T21:11:47.225331 | 2016-01-01T22:05:29 | 2016-01-01T22:05:29 | 48,852,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | user = input( 'I am a robot and Id like to talk to you. What is your name? : ' )
print( 'Hi' , user , end = '!\n')
lang = input( 'What is your favourite programming language? : ')
print( lang , 'is' , 'fun' , sep = ' * ' , end = '!\n' ) | [
"bethiaseager@bens-mbp.home"
] | bethiaseager@bens-mbp.home |
b0542480ade936383d803d7de35c312f5012cdc6 | 50ca0ee3dc1d104538205f8e2f942a4abd7eb234 | /trydjango/urls.py | f7bd17bcc8bfed1fcccb2b276aa4b528797d0f72 | [] | no_license | guptapranav4299/django-todo | b21381f27aefe0feeaa59bd266e107024de7e395 | 699c19eef345eda8b1d9474411c28bd64f826322 | refs/heads/master | 2023-04-12T17:13:12.874647 | 2021-05-06T09:40:49 | 2021-05-06T09:40:49 | 364,859,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | """trydjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('tasks.urls')),
]
| [
"guptapranav4299@gmail.com"
] | guptapranav4299@gmail.com |
db37f97137da36e0aa5862181d7e6039055b5542 | 4e8b78ea8909973ad4fb96bad9abece11e562f9f | /code/Wed-06/main_menu_scene.py | 6badb7719f80b67f925fe5f72a410e2c2158dd09 | [] | no_license | HamzaSalman/Wednesday-06 | 60b281814dc84575f746cf7ac34bc73dbb45ac9b | e7d31b3435279346b70a8668487822a688562ad0 | refs/heads/master | 2020-12-24T12:41:25.864238 | 2016-11-05T23:10:19 | 2016-11-05T23:10:19 | 72,957,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,475 | py | # Created by: Hamza Salman
# Created on: October 2016
# Created for: ICS3U
# This scene shows the main menu.
from scene import *
import ui
from game_scene import *
from help_scene import *
class MainMenuScene(Scene):
def setup(self):
# this method is called, when user moves to this scene
# add background color
self.background = SpriteNode(position = self.size / 2,
color = 'white',
parent = self,
size = self.size)
self.start_button = SpriteNode('./assets/sprites/start.png',
parent = self,
position = self.size/2)
help_button_position = self.size/2
help_button_position.y = help_button_position.y - 200
self.help_button = SpriteNode('./assets/sprites/help.png',
parent = self,
position = help_button_position)
def update(self):
# this method is called, hopefully, 60 times a second
pass
def touch_began(self, touch):
# this method is called, when user touches the screen
pass
def touch_moved(self, touch):
# this method is called, when user moves a finger around on the screen
pass
def touch_ended(self, touch):
# this method is called, when user releases a finger from the screen
# if start button is pressed, goto game scene
if self.start_button.frame.contains_point(touch.location):
self.present_modal_scene(GameScene())
# if start button is pressed, goto game scene
if self.help_button.frame.contains_point(touch.location):
self.present_modal_scene(HelpScene())
def did_change_size(self):
# this method is called, when user changes the orientation of the screen
# thus changing the size of each dimension
pass
def pause(self):
# this method is called, when user touches the home button
# save anything before app is put to background
pass
def resume(self):
# this method is called, when user place app from background
# back into use. Reload anything you might need.
pass
| [
"hamza.salman@ocsbstudent.ca"
] | hamza.salman@ocsbstudent.ca |
ec97150d9242ddf0327f7c531aba3a086d5c3f25 | e68653fe1d5d4d611938333bdd3850102be4bc54 | /AlgorithmAndDataStructure/Basic/binary_search.py | 32cc082ff7763dda5d7f6b51260ae06789417a15 | [] | no_license | LUTLJS/algorithm_and_data_structure | 38a5654410949b3fcc644b0fbee57a5376ced711 | 1c449320c1bd6d5f3637eb257648a4c2156107e7 | refs/heads/master | 2021-04-15T09:14:15.921200 | 2018-03-30T15:50:38 | 2018-03-30T15:50:38 | 126,795,316 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | def binary_search(list, item):
low = 0
high = len(list) - 1
while low <= high:
mid = (low+high) // 2
guess = list[mid]
if guess == item:
return print('Yeah,found it:%d'%mid)
if guess > item:
high = mid - 1
else:
low = mid + 1
return print('Oops,there is no one.')
'''for example'''
my_list = [1,4,8,9,22,55,65,89,321,2345]
binary_search(my_list, 2345) | [
"you@example.com"
] | you@example.com |
7e93b484dc2b6cdc86eddf2f7565b9d4b0f2c2f1 | 6581ff32670e4b30dd17c781975c95eac2153ebc | /libnode-v10.15.3/deps/openssl/config/archs/solaris64-x86_64-gcc/asm/openssl.gypi | bdef71da400d019db281b3592835d891f282de89 | [
"Apache-2.0",
"LicenseRef-scancode-unicode",
"Zlib",
"ISC",
"LicenseRef-scancode-public-domain",
"NAIST-2003",
"BSD-3-Clause",
"BSD-2-Clause",
"Artistic-2.0",
"LicenseRef-scancode-unknown-license-reference",
"NTP",
"LicenseRef-scancode-openssl",
"MIT",
"ICU",
"LicenseRef-scancode-free-unknown"
] | permissive | pxscene/Spark-Externals | 6f3a16bafae1d89015f635b1ad1aa2efe342a001 | 92501a5d10c2a167bac07915eb9c078dc9aab158 | refs/heads/master | 2023-01-22T12:48:39.455338 | 2020-05-01T14:19:54 | 2020-05-01T14:19:54 | 205,173,203 | 1 | 35 | Apache-2.0 | 2023-01-07T09:41:55 | 2019-08-29T13:43:36 | C | UTF-8 | Python | false | false | 27,945 | gypi | {
'variables': {
'openssl_sources': [
'openssl/ssl/bio_ssl.c',
'openssl/ssl/d1_lib.c',
'openssl/ssl/d1_msg.c',
'openssl/ssl/d1_srtp.c',
'openssl/ssl/methods.c',
'openssl/ssl/pqueue.c',
'openssl/ssl/record/dtls1_bitmap.c',
'openssl/ssl/record/rec_layer_d1.c',
'openssl/ssl/record/rec_layer_s3.c',
'openssl/ssl/record/ssl3_buffer.c',
'openssl/ssl/record/ssl3_record.c',
'openssl/ssl/s3_cbc.c',
'openssl/ssl/s3_enc.c',
'openssl/ssl/s3_lib.c',
'openssl/ssl/s3_msg.c',
'openssl/ssl/ssl_asn1.c',
'openssl/ssl/ssl_cert.c',
'openssl/ssl/ssl_ciph.c',
'openssl/ssl/ssl_conf.c',
'openssl/ssl/ssl_err.c',
'openssl/ssl/ssl_init.c',
'openssl/ssl/ssl_lib.c',
'openssl/ssl/ssl_mcnf.c',
'openssl/ssl/ssl_rsa.c',
'openssl/ssl/ssl_sess.c',
'openssl/ssl/ssl_stat.c',
'openssl/ssl/ssl_txt.c',
'openssl/ssl/ssl_utst.c',
'openssl/ssl/statem/statem.c',
'openssl/ssl/statem/statem_clnt.c',
'openssl/ssl/statem/statem_dtls.c',
'openssl/ssl/statem/statem_lib.c',
'openssl/ssl/statem/statem_srvr.c',
'openssl/ssl/t1_enc.c',
'openssl/ssl/t1_ext.c',
'openssl/ssl/t1_lib.c',
'openssl/ssl/t1_reneg.c',
'openssl/ssl/t1_trce.c',
'openssl/ssl/tls_srp.c',
'openssl/crypto/aes/aes_cfb.c',
'openssl/crypto/aes/aes_ecb.c',
'openssl/crypto/aes/aes_ige.c',
'openssl/crypto/aes/aes_misc.c',
'openssl/crypto/aes/aes_ofb.c',
'openssl/crypto/aes/aes_wrap.c',
'openssl/crypto/asn1/a_bitstr.c',
'openssl/crypto/asn1/a_d2i_fp.c',
'openssl/crypto/asn1/a_digest.c',
'openssl/crypto/asn1/a_dup.c',
'openssl/crypto/asn1/a_gentm.c',
'openssl/crypto/asn1/a_i2d_fp.c',
'openssl/crypto/asn1/a_int.c',
'openssl/crypto/asn1/a_mbstr.c',
'openssl/crypto/asn1/a_object.c',
'openssl/crypto/asn1/a_octet.c',
'openssl/crypto/asn1/a_print.c',
'openssl/crypto/asn1/a_sign.c',
'openssl/crypto/asn1/a_strex.c',
'openssl/crypto/asn1/a_strnid.c',
'openssl/crypto/asn1/a_time.c',
'openssl/crypto/asn1/a_type.c',
'openssl/crypto/asn1/a_utctm.c',
'openssl/crypto/asn1/a_utf8.c',
'openssl/crypto/asn1/a_verify.c',
'openssl/crypto/asn1/ameth_lib.c',
'openssl/crypto/asn1/asn1_err.c',
'openssl/crypto/asn1/asn1_gen.c',
'openssl/crypto/asn1/asn1_lib.c',
'openssl/crypto/asn1/asn1_par.c',
'openssl/crypto/asn1/asn_mime.c',
'openssl/crypto/asn1/asn_moid.c',
'openssl/crypto/asn1/asn_mstbl.c',
'openssl/crypto/asn1/asn_pack.c',
'openssl/crypto/asn1/bio_asn1.c',
'openssl/crypto/asn1/bio_ndef.c',
'openssl/crypto/asn1/d2i_pr.c',
'openssl/crypto/asn1/d2i_pu.c',
'openssl/crypto/asn1/evp_asn1.c',
'openssl/crypto/asn1/f_int.c',
'openssl/crypto/asn1/f_string.c',
'openssl/crypto/asn1/i2d_pr.c',
'openssl/crypto/asn1/i2d_pu.c',
'openssl/crypto/asn1/n_pkey.c',
'openssl/crypto/asn1/nsseq.c',
'openssl/crypto/asn1/p5_pbe.c',
'openssl/crypto/asn1/p5_pbev2.c',
'openssl/crypto/asn1/p5_scrypt.c',
'openssl/crypto/asn1/p8_pkey.c',
'openssl/crypto/asn1/t_bitst.c',
'openssl/crypto/asn1/t_pkey.c',
'openssl/crypto/asn1/t_spki.c',
'openssl/crypto/asn1/tasn_dec.c',
'openssl/crypto/asn1/tasn_enc.c',
'openssl/crypto/asn1/tasn_fre.c',
'openssl/crypto/asn1/tasn_new.c',
'openssl/crypto/asn1/tasn_prn.c',
'openssl/crypto/asn1/tasn_scn.c',
'openssl/crypto/asn1/tasn_typ.c',
'openssl/crypto/asn1/tasn_utl.c',
'openssl/crypto/asn1/x_algor.c',
'openssl/crypto/asn1/x_bignum.c',
'openssl/crypto/asn1/x_info.c',
'openssl/crypto/asn1/x_int64.c',
'openssl/crypto/asn1/x_long.c',
'openssl/crypto/asn1/x_pkey.c',
'openssl/crypto/asn1/x_sig.c',
'openssl/crypto/asn1/x_spki.c',
'openssl/crypto/asn1/x_val.c',
'openssl/crypto/async/arch/async_null.c',
'openssl/crypto/async/arch/async_posix.c',
'openssl/crypto/async/arch/async_win.c',
'openssl/crypto/async/async.c',
'openssl/crypto/async/async_err.c',
'openssl/crypto/async/async_wait.c',
'openssl/crypto/bf/bf_cfb64.c',
'openssl/crypto/bf/bf_ecb.c',
'openssl/crypto/bf/bf_enc.c',
'openssl/crypto/bf/bf_ofb64.c',
'openssl/crypto/bf/bf_skey.c',
'openssl/crypto/bio/b_addr.c',
'openssl/crypto/bio/b_dump.c',
'openssl/crypto/bio/b_print.c',
'openssl/crypto/bio/b_sock.c',
'openssl/crypto/bio/b_sock2.c',
'openssl/crypto/bio/bf_buff.c',
'openssl/crypto/bio/bf_lbuf.c',
'openssl/crypto/bio/bf_nbio.c',
'openssl/crypto/bio/bf_null.c',
'openssl/crypto/bio/bio_cb.c',
'openssl/crypto/bio/bio_err.c',
'openssl/crypto/bio/bio_lib.c',
'openssl/crypto/bio/bio_meth.c',
'openssl/crypto/bio/bss_acpt.c',
'openssl/crypto/bio/bss_bio.c',
'openssl/crypto/bio/bss_conn.c',
'openssl/crypto/bio/bss_dgram.c',
'openssl/crypto/bio/bss_fd.c',
'openssl/crypto/bio/bss_file.c',
'openssl/crypto/bio/bss_log.c',
'openssl/crypto/bio/bss_mem.c',
'openssl/crypto/bio/bss_null.c',
'openssl/crypto/bio/bss_sock.c',
'openssl/crypto/blake2/blake2b.c',
'openssl/crypto/blake2/blake2s.c',
'openssl/crypto/blake2/m_blake2b.c',
'openssl/crypto/blake2/m_blake2s.c',
'openssl/crypto/bn/asm/x86_64-gcc.c',
'openssl/crypto/bn/bn_add.c',
'openssl/crypto/bn/bn_blind.c',
'openssl/crypto/bn/bn_const.c',
'openssl/crypto/bn/bn_ctx.c',
'openssl/crypto/bn/bn_depr.c',
'openssl/crypto/bn/bn_dh.c',
'openssl/crypto/bn/bn_div.c',
'openssl/crypto/bn/bn_err.c',
'openssl/crypto/bn/bn_exp.c',
'openssl/crypto/bn/bn_exp2.c',
'openssl/crypto/bn/bn_gcd.c',
'openssl/crypto/bn/bn_gf2m.c',
'openssl/crypto/bn/bn_intern.c',
'openssl/crypto/bn/bn_kron.c',
'openssl/crypto/bn/bn_lib.c',
'openssl/crypto/bn/bn_mod.c',
'openssl/crypto/bn/bn_mont.c',
'openssl/crypto/bn/bn_mpi.c',
'openssl/crypto/bn/bn_mul.c',
'openssl/crypto/bn/bn_nist.c',
'openssl/crypto/bn/bn_prime.c',
'openssl/crypto/bn/bn_print.c',
'openssl/crypto/bn/bn_rand.c',
'openssl/crypto/bn/bn_recp.c',
'openssl/crypto/bn/bn_shift.c',
'openssl/crypto/bn/bn_sqr.c',
'openssl/crypto/bn/bn_sqrt.c',
'openssl/crypto/bn/bn_srp.c',
'openssl/crypto/bn/bn_word.c',
'openssl/crypto/bn/bn_x931p.c',
'openssl/crypto/bn/rsaz_exp.c',
'openssl/crypto/buffer/buf_err.c',
'openssl/crypto/buffer/buffer.c',
'openssl/crypto/camellia/cmll_cfb.c',
'openssl/crypto/camellia/cmll_ctr.c',
'openssl/crypto/camellia/cmll_ecb.c',
'openssl/crypto/camellia/cmll_misc.c',
'openssl/crypto/camellia/cmll_ofb.c',
'openssl/crypto/cast/c_cfb64.c',
'openssl/crypto/cast/c_ecb.c',
'openssl/crypto/cast/c_enc.c',
'openssl/crypto/cast/c_ofb64.c',
'openssl/crypto/cast/c_skey.c',
'openssl/crypto/cmac/cm_ameth.c',
'openssl/crypto/cmac/cm_pmeth.c',
'openssl/crypto/cmac/cmac.c',
'openssl/crypto/cms/cms_asn1.c',
'openssl/crypto/cms/cms_att.c',
'openssl/crypto/cms/cms_cd.c',
'openssl/crypto/cms/cms_dd.c',
'openssl/crypto/cms/cms_enc.c',
'openssl/crypto/cms/cms_env.c',
'openssl/crypto/cms/cms_err.c',
'openssl/crypto/cms/cms_ess.c',
'openssl/crypto/cms/cms_io.c',
'openssl/crypto/cms/cms_kari.c',
'openssl/crypto/cms/cms_lib.c',
'openssl/crypto/cms/cms_pwri.c',
'openssl/crypto/cms/cms_sd.c',
'openssl/crypto/cms/cms_smime.c',
'openssl/crypto/conf/conf_api.c',
'openssl/crypto/conf/conf_def.c',
'openssl/crypto/conf/conf_err.c',
'openssl/crypto/conf/conf_lib.c',
'openssl/crypto/conf/conf_mall.c',
'openssl/crypto/conf/conf_mod.c',
'openssl/crypto/conf/conf_sap.c',
'openssl/crypto/conf/conf_ssl.c',
'openssl/crypto/cpt_err.c',
'openssl/crypto/cryptlib.c',
'openssl/crypto/ct/ct_b64.c',
'openssl/crypto/ct/ct_err.c',
'openssl/crypto/ct/ct_log.c',
'openssl/crypto/ct/ct_oct.c',
'openssl/crypto/ct/ct_policy.c',
'openssl/crypto/ct/ct_prn.c',
'openssl/crypto/ct/ct_sct.c',
'openssl/crypto/ct/ct_sct_ctx.c',
'openssl/crypto/ct/ct_vfy.c',
'openssl/crypto/ct/ct_x509v3.c',
'openssl/crypto/cversion.c',
'openssl/crypto/des/cbc_cksm.c',
'openssl/crypto/des/cbc_enc.c',
'openssl/crypto/des/cfb64ede.c',
'openssl/crypto/des/cfb64enc.c',
'openssl/crypto/des/cfb_enc.c',
'openssl/crypto/des/des_enc.c',
'openssl/crypto/des/ecb3_enc.c',
'openssl/crypto/des/ecb_enc.c',
'openssl/crypto/des/fcrypt.c',
'openssl/crypto/des/fcrypt_b.c',
'openssl/crypto/des/ofb64ede.c',
'openssl/crypto/des/ofb64enc.c',
'openssl/crypto/des/ofb_enc.c',
'openssl/crypto/des/pcbc_enc.c',
'openssl/crypto/des/qud_cksm.c',
'openssl/crypto/des/rand_key.c',
'openssl/crypto/des/rpc_enc.c',
'openssl/crypto/des/set_key.c',
'openssl/crypto/des/str2key.c',
'openssl/crypto/des/xcbc_enc.c',
'openssl/crypto/dh/dh_ameth.c',
'openssl/crypto/dh/dh_asn1.c',
'openssl/crypto/dh/dh_check.c',
'openssl/crypto/dh/dh_depr.c',
'openssl/crypto/dh/dh_err.c',
'openssl/crypto/dh/dh_gen.c',
'openssl/crypto/dh/dh_kdf.c',
'openssl/crypto/dh/dh_key.c',
'openssl/crypto/dh/dh_lib.c',
'openssl/crypto/dh/dh_meth.c',
'openssl/crypto/dh/dh_pmeth.c',
'openssl/crypto/dh/dh_prn.c',
'openssl/crypto/dh/dh_rfc5114.c',
'openssl/crypto/dsa/dsa_ameth.c',
'openssl/crypto/dsa/dsa_asn1.c',
'openssl/crypto/dsa/dsa_depr.c',
'openssl/crypto/dsa/dsa_err.c',
'openssl/crypto/dsa/dsa_gen.c',
'openssl/crypto/dsa/dsa_key.c',
'openssl/crypto/dsa/dsa_lib.c',
'openssl/crypto/dsa/dsa_meth.c',
'openssl/crypto/dsa/dsa_ossl.c',
'openssl/crypto/dsa/dsa_pmeth.c',
'openssl/crypto/dsa/dsa_prn.c',
'openssl/crypto/dsa/dsa_sign.c',
'openssl/crypto/dsa/dsa_vrf.c',
'openssl/crypto/dso/dso_dl.c',
'openssl/crypto/dso/dso_dlfcn.c',
'openssl/crypto/dso/dso_err.c',
'openssl/crypto/dso/dso_lib.c',
'openssl/crypto/dso/dso_openssl.c',
'openssl/crypto/dso/dso_vms.c',
'openssl/crypto/dso/dso_win32.c',
'openssl/crypto/ebcdic.c',
'openssl/crypto/ec/curve25519.c',
'openssl/crypto/ec/ec2_mult.c',
'openssl/crypto/ec/ec2_oct.c',
'openssl/crypto/ec/ec2_smpl.c',
'openssl/crypto/ec/ec_ameth.c',
'openssl/crypto/ec/ec_asn1.c',
'openssl/crypto/ec/ec_check.c',
'openssl/crypto/ec/ec_curve.c',
'openssl/crypto/ec/ec_cvt.c',
'openssl/crypto/ec/ec_err.c',
'openssl/crypto/ec/ec_key.c',
'openssl/crypto/ec/ec_kmeth.c',
'openssl/crypto/ec/ec_lib.c',
'openssl/crypto/ec/ec_mult.c',
'openssl/crypto/ec/ec_oct.c',
'openssl/crypto/ec/ec_pmeth.c',
'openssl/crypto/ec/ec_print.c',
'openssl/crypto/ec/ecdh_kdf.c',
'openssl/crypto/ec/ecdh_ossl.c',
'openssl/crypto/ec/ecdsa_ossl.c',
'openssl/crypto/ec/ecdsa_sign.c',
'openssl/crypto/ec/ecdsa_vrf.c',
'openssl/crypto/ec/eck_prn.c',
'openssl/crypto/ec/ecp_mont.c',
'openssl/crypto/ec/ecp_nist.c',
'openssl/crypto/ec/ecp_nistp224.c',
'openssl/crypto/ec/ecp_nistp256.c',
'openssl/crypto/ec/ecp_nistp521.c',
'openssl/crypto/ec/ecp_nistputil.c',
'openssl/crypto/ec/ecp_nistz256.c',
'openssl/crypto/ec/ecp_oct.c',
'openssl/crypto/ec/ecp_smpl.c',
'openssl/crypto/ec/ecx_meth.c',
'openssl/crypto/engine/eng_all.c',
'openssl/crypto/engine/eng_cnf.c',
'openssl/crypto/engine/eng_cryptodev.c',
'openssl/crypto/engine/eng_ctrl.c',
'openssl/crypto/engine/eng_dyn.c',
'openssl/crypto/engine/eng_err.c',
'openssl/crypto/engine/eng_fat.c',
'openssl/crypto/engine/eng_init.c',
'openssl/crypto/engine/eng_lib.c',
'openssl/crypto/engine/eng_list.c',
'openssl/crypto/engine/eng_openssl.c',
'openssl/crypto/engine/eng_pkey.c',
'openssl/crypto/engine/eng_rdrand.c',
'openssl/crypto/engine/eng_table.c',
'openssl/crypto/engine/tb_asnmth.c',
'openssl/crypto/engine/tb_cipher.c',
'openssl/crypto/engine/tb_dh.c',
'openssl/crypto/engine/tb_digest.c',
'openssl/crypto/engine/tb_dsa.c',
'openssl/crypto/engine/tb_eckey.c',
'openssl/crypto/engine/tb_pkmeth.c',
'openssl/crypto/engine/tb_rand.c',
'openssl/crypto/engine/tb_rsa.c',
'openssl/crypto/err/err.c',
'openssl/crypto/err/err_all.c',
'openssl/crypto/err/err_prn.c',
'openssl/crypto/evp/bio_b64.c',
'openssl/crypto/evp/bio_enc.c',
'openssl/crypto/evp/bio_md.c',
'openssl/crypto/evp/bio_ok.c',
'openssl/crypto/evp/c_allc.c',
'openssl/crypto/evp/c_alld.c',
'openssl/crypto/evp/cmeth_lib.c',
'openssl/crypto/evp/digest.c',
'openssl/crypto/evp/e_aes.c',
'openssl/crypto/evp/e_aes_cbc_hmac_sha1.c',
'openssl/crypto/evp/e_aes_cbc_hmac_sha256.c',
'openssl/crypto/evp/e_bf.c',
'openssl/crypto/evp/e_camellia.c',
'openssl/crypto/evp/e_cast.c',
'openssl/crypto/evp/e_chacha20_poly1305.c',
'openssl/crypto/evp/e_des.c',
'openssl/crypto/evp/e_des3.c',
'openssl/crypto/evp/e_idea.c',
'openssl/crypto/evp/e_null.c',
'openssl/crypto/evp/e_old.c',
'openssl/crypto/evp/e_rc2.c',
'openssl/crypto/evp/e_rc4.c',
'openssl/crypto/evp/e_rc4_hmac_md5.c',
'openssl/crypto/evp/e_rc5.c',
'openssl/crypto/evp/e_seed.c',
'openssl/crypto/evp/e_xcbc_d.c',
'openssl/crypto/evp/encode.c',
'openssl/crypto/evp/evp_cnf.c',
'openssl/crypto/evp/evp_enc.c',
'openssl/crypto/evp/evp_err.c',
'openssl/crypto/evp/evp_key.c',
'openssl/crypto/evp/evp_lib.c',
'openssl/crypto/evp/evp_pbe.c',
'openssl/crypto/evp/evp_pkey.c',
'openssl/crypto/evp/m_md2.c',
'openssl/crypto/evp/m_md4.c',
'openssl/crypto/evp/m_md5.c',
'openssl/crypto/evp/m_md5_sha1.c',
'openssl/crypto/evp/m_mdc2.c',
'openssl/crypto/evp/m_null.c',
'openssl/crypto/evp/m_ripemd.c',
'openssl/crypto/evp/m_sha1.c',
'openssl/crypto/evp/m_sigver.c',
'openssl/crypto/evp/m_wp.c',
'openssl/crypto/evp/names.c',
'openssl/crypto/evp/p5_crpt.c',
'openssl/crypto/evp/p5_crpt2.c',
'openssl/crypto/evp/p_dec.c',
'openssl/crypto/evp/p_enc.c',
'openssl/crypto/evp/p_lib.c',
'openssl/crypto/evp/p_open.c',
'openssl/crypto/evp/p_seal.c',
'openssl/crypto/evp/p_sign.c',
'openssl/crypto/evp/p_verify.c',
'openssl/crypto/evp/pmeth_fn.c',
'openssl/crypto/evp/pmeth_gn.c',
'openssl/crypto/evp/pmeth_lib.c',
'openssl/crypto/evp/scrypt.c',
'openssl/crypto/ex_data.c',
'openssl/crypto/getenv.c',
'openssl/crypto/hmac/hm_ameth.c',
'openssl/crypto/hmac/hm_pmeth.c',
'openssl/crypto/hmac/hmac.c',
'openssl/crypto/idea/i_cbc.c',
'openssl/crypto/idea/i_cfb64.c',
'openssl/crypto/idea/i_ecb.c',
'openssl/crypto/idea/i_ofb64.c',
'openssl/crypto/idea/i_skey.c',
'openssl/crypto/init.c',
'openssl/crypto/kdf/hkdf.c',
'openssl/crypto/kdf/kdf_err.c',
'openssl/crypto/kdf/tls1_prf.c',
'openssl/crypto/lhash/lh_stats.c',
'openssl/crypto/lhash/lhash.c',
'openssl/crypto/md4/md4_dgst.c',
'openssl/crypto/md4/md4_one.c',
'openssl/crypto/md5/md5_dgst.c',
'openssl/crypto/md5/md5_one.c',
'openssl/crypto/mdc2/mdc2_one.c',
'openssl/crypto/mdc2/mdc2dgst.c',
'openssl/crypto/mem.c',
'openssl/crypto/mem_dbg.c',
'openssl/crypto/mem_sec.c',
'openssl/crypto/modes/cbc128.c',
'openssl/crypto/modes/ccm128.c',
'openssl/crypto/modes/cfb128.c',
'openssl/crypto/modes/ctr128.c',
'openssl/crypto/modes/cts128.c',
'openssl/crypto/modes/gcm128.c',
'openssl/crypto/modes/ocb128.c',
'openssl/crypto/modes/ofb128.c',
'openssl/crypto/modes/wrap128.c',
'openssl/crypto/modes/xts128.c',
'openssl/crypto/o_dir.c',
'openssl/crypto/o_fips.c',
'openssl/crypto/o_fopen.c',
'openssl/crypto/o_init.c',
'openssl/crypto/o_str.c',
'openssl/crypto/o_time.c',
'openssl/crypto/objects/o_names.c',
'openssl/crypto/objects/obj_dat.c',
'openssl/crypto/objects/obj_err.c',
'openssl/crypto/objects/obj_lib.c',
'openssl/crypto/objects/obj_xref.c',
'openssl/crypto/ocsp/ocsp_asn.c',
'openssl/crypto/ocsp/ocsp_cl.c',
'openssl/crypto/ocsp/ocsp_err.c',
'openssl/crypto/ocsp/ocsp_ext.c',
'openssl/crypto/ocsp/ocsp_ht.c',
'openssl/crypto/ocsp/ocsp_lib.c',
'openssl/crypto/ocsp/ocsp_prn.c',
'openssl/crypto/ocsp/ocsp_srv.c',
'openssl/crypto/ocsp/ocsp_vfy.c',
'openssl/crypto/ocsp/v3_ocsp.c',
'openssl/crypto/pem/pem_all.c',
'openssl/crypto/pem/pem_err.c',
'openssl/crypto/pem/pem_info.c',
'openssl/crypto/pem/pem_lib.c',
'openssl/crypto/pem/pem_oth.c',
'openssl/crypto/pem/pem_pk8.c',
'openssl/crypto/pem/pem_pkey.c',
'openssl/crypto/pem/pem_sign.c',
'openssl/crypto/pem/pem_x509.c',
'openssl/crypto/pem/pem_xaux.c',
'openssl/crypto/pem/pvkfmt.c',
'openssl/crypto/pkcs12/p12_add.c',
'openssl/crypto/pkcs12/p12_asn.c',
'openssl/crypto/pkcs12/p12_attr.c',
'openssl/crypto/pkcs12/p12_crpt.c',
'openssl/crypto/pkcs12/p12_crt.c',
'openssl/crypto/pkcs12/p12_decr.c',
'openssl/crypto/pkcs12/p12_init.c',
'openssl/crypto/pkcs12/p12_key.c',
'openssl/crypto/pkcs12/p12_kiss.c',
'openssl/crypto/pkcs12/p12_mutl.c',
'openssl/crypto/pkcs12/p12_npas.c',
'openssl/crypto/pkcs12/p12_p8d.c',
'openssl/crypto/pkcs12/p12_p8e.c',
'openssl/crypto/pkcs12/p12_sbag.c',
'openssl/crypto/pkcs12/p12_utl.c',
'openssl/crypto/pkcs12/pk12err.c',
'openssl/crypto/pkcs7/bio_pk7.c',
'openssl/crypto/pkcs7/pk7_asn1.c',
'openssl/crypto/pkcs7/pk7_attr.c',
'openssl/crypto/pkcs7/pk7_doit.c',
'openssl/crypto/pkcs7/pk7_lib.c',
'openssl/crypto/pkcs7/pk7_mime.c',
'openssl/crypto/pkcs7/pk7_smime.c',
'openssl/crypto/pkcs7/pkcs7err.c',
'openssl/crypto/poly1305/poly1305.c',
'openssl/crypto/rand/md_rand.c',
'openssl/crypto/rand/rand_egd.c',
'openssl/crypto/rand/rand_err.c',
'openssl/crypto/rand/rand_lib.c',
'openssl/crypto/rand/rand_unix.c',
'openssl/crypto/rand/rand_vms.c',
'openssl/crypto/rand/rand_win.c',
'openssl/crypto/rand/randfile.c',
'openssl/crypto/rc2/rc2_cbc.c',
'openssl/crypto/rc2/rc2_ecb.c',
'openssl/crypto/rc2/rc2_skey.c',
'openssl/crypto/rc2/rc2cfb64.c',
'openssl/crypto/rc2/rc2ofb64.c',
'openssl/crypto/ripemd/rmd_dgst.c',
'openssl/crypto/ripemd/rmd_one.c',
'openssl/crypto/rsa/rsa_ameth.c',
'openssl/crypto/rsa/rsa_asn1.c',
'openssl/crypto/rsa/rsa_chk.c',
'openssl/crypto/rsa/rsa_crpt.c',
'openssl/crypto/rsa/rsa_depr.c',
'openssl/crypto/rsa/rsa_err.c',
'openssl/crypto/rsa/rsa_gen.c',
'openssl/crypto/rsa/rsa_lib.c',
'openssl/crypto/rsa/rsa_meth.c',
'openssl/crypto/rsa/rsa_none.c',
'openssl/crypto/rsa/rsa_null.c',
'openssl/crypto/rsa/rsa_oaep.c',
'openssl/crypto/rsa/rsa_ossl.c',
'openssl/crypto/rsa/rsa_pk1.c',
'openssl/crypto/rsa/rsa_pmeth.c',
'openssl/crypto/rsa/rsa_prn.c',
'openssl/crypto/rsa/rsa_pss.c',
'openssl/crypto/rsa/rsa_saos.c',
'openssl/crypto/rsa/rsa_sign.c',
'openssl/crypto/rsa/rsa_ssl.c',
'openssl/crypto/rsa/rsa_x931.c',
'openssl/crypto/rsa/rsa_x931g.c',
'openssl/crypto/seed/seed.c',
'openssl/crypto/seed/seed_cbc.c',
'openssl/crypto/seed/seed_cfb.c',
'openssl/crypto/seed/seed_ecb.c',
'openssl/crypto/seed/seed_ofb.c',
'openssl/crypto/sha/sha1_one.c',
'openssl/crypto/sha/sha1dgst.c',
'openssl/crypto/sha/sha256.c',
'openssl/crypto/sha/sha512.c',
'openssl/crypto/srp/srp_lib.c',
'openssl/crypto/srp/srp_vfy.c',
'openssl/crypto/stack/stack.c',
'openssl/crypto/threads_none.c',
'openssl/crypto/threads_pthread.c',
'openssl/crypto/threads_win.c',
'openssl/crypto/ts/ts_asn1.c',
'openssl/crypto/ts/ts_conf.c',
'openssl/crypto/ts/ts_err.c',
'openssl/crypto/ts/ts_lib.c',
'openssl/crypto/ts/ts_req_print.c',
'openssl/crypto/ts/ts_req_utils.c',
'openssl/crypto/ts/ts_rsp_print.c',
'openssl/crypto/ts/ts_rsp_sign.c',
'openssl/crypto/ts/ts_rsp_utils.c',
'openssl/crypto/ts/ts_rsp_verify.c',
'openssl/crypto/ts/ts_verify_ctx.c',
'openssl/crypto/txt_db/txt_db.c',
'openssl/crypto/ui/ui_err.c',
'openssl/crypto/ui/ui_lib.c',
'openssl/crypto/ui/ui_openssl.c',
'openssl/crypto/ui/ui_util.c',
'openssl/crypto/uid.c',
'openssl/crypto/whrlpool/wp_dgst.c',
'openssl/crypto/x509/by_dir.c',
'openssl/crypto/x509/by_file.c',
'openssl/crypto/x509/t_crl.c',
'openssl/crypto/x509/t_req.c',
'openssl/crypto/x509/t_x509.c',
'openssl/crypto/x509/x509_att.c',
'openssl/crypto/x509/x509_cmp.c',
'openssl/crypto/x509/x509_d2.c',
'openssl/crypto/x509/x509_def.c',
'openssl/crypto/x509/x509_err.c',
'openssl/crypto/x509/x509_ext.c',
'openssl/crypto/x509/x509_lu.c',
'openssl/crypto/x509/x509_meth.c',
'openssl/crypto/x509/x509_obj.c',
'openssl/crypto/x509/x509_r2x.c',
'openssl/crypto/x509/x509_req.c',
'openssl/crypto/x509/x509_set.c',
'openssl/crypto/x509/x509_trs.c',
'openssl/crypto/x509/x509_txt.c',
'openssl/crypto/x509/x509_v3.c',
'openssl/crypto/x509/x509_vfy.c',
'openssl/crypto/x509/x509_vpm.c',
'openssl/crypto/x509/x509cset.c',
'openssl/crypto/x509/x509name.c',
'openssl/crypto/x509/x509rset.c',
'openssl/crypto/x509/x509spki.c',
'openssl/crypto/x509/x509type.c',
'openssl/crypto/x509/x_all.c',
'openssl/crypto/x509/x_attrib.c',
'openssl/crypto/x509/x_crl.c',
'openssl/crypto/x509/x_exten.c',
'openssl/crypto/x509/x_name.c',
'openssl/crypto/x509/x_pubkey.c',
'openssl/crypto/x509/x_req.c',
'openssl/crypto/x509/x_x509.c',
'openssl/crypto/x509/x_x509a.c',
'openssl/crypto/x509v3/pcy_cache.c',
'openssl/crypto/x509v3/pcy_data.c',
'openssl/crypto/x509v3/pcy_lib.c',
'openssl/crypto/x509v3/pcy_map.c',
'openssl/crypto/x509v3/pcy_node.c',
'openssl/crypto/x509v3/pcy_tree.c',
'openssl/crypto/x509v3/v3_addr.c',
'openssl/crypto/x509v3/v3_akey.c',
'openssl/crypto/x509v3/v3_akeya.c',
'openssl/crypto/x509v3/v3_alt.c',
'openssl/crypto/x509v3/v3_asid.c',
'openssl/crypto/x509v3/v3_bcons.c',
'openssl/crypto/x509v3/v3_bitst.c',
'openssl/crypto/x509v3/v3_conf.c',
'openssl/crypto/x509v3/v3_cpols.c',
'openssl/crypto/x509v3/v3_crld.c',
'openssl/crypto/x509v3/v3_enum.c',
'openssl/crypto/x509v3/v3_extku.c',
'openssl/crypto/x509v3/v3_genn.c',
'openssl/crypto/x509v3/v3_ia5.c',
'openssl/crypto/x509v3/v3_info.c',
'openssl/crypto/x509v3/v3_int.c',
'openssl/crypto/x509v3/v3_lib.c',
'openssl/crypto/x509v3/v3_ncons.c',
'openssl/crypto/x509v3/v3_pci.c',
'openssl/crypto/x509v3/v3_pcia.c',
'openssl/crypto/x509v3/v3_pcons.c',
'openssl/crypto/x509v3/v3_pku.c',
'openssl/crypto/x509v3/v3_pmaps.c',
'openssl/crypto/x509v3/v3_prn.c',
'openssl/crypto/x509v3/v3_purp.c',
'openssl/crypto/x509v3/v3_skey.c',
'openssl/crypto/x509v3/v3_sxnet.c',
'openssl/crypto/x509v3/v3_tlsf.c',
'openssl/crypto/x509v3/v3_utl.c',
'openssl/crypto/x509v3/v3err.c',
'openssl/engines/e_capi.c',
'openssl/engines/e_padlock.c',
],
'openssl_sources_solaris64-x86_64-gcc': [
'./config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aes-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-mb-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-sha1-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-sha256-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/aes/aesni-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/aes/bsaes-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/aes/vpaes-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/bn/rsaz-avx2.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/bn/rsaz-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/bn/x86_64-gf2m.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/bn/x86_64-mont.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/bn/x86_64-mont5.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/camellia/cmll-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/chacha/chacha-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/ec/ecp_nistz256-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/md5/md5-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/modes/aesni-gcm-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/modes/ghash-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/poly1305/poly1305-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/rc4/rc4-md5-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/rc4/rc4-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha1-mb-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha1-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha256-mb-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha256-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/sha/sha512-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/whrlpool/wp-x86_64.s',
'./config/archs/solaris64-x86_64-gcc/asm/crypto/x86_64cpuid.s',
'./config/archs/solaris64-x86_64-gcc/asm/engines/e_padlock-x86_64.s',
],
'openssl_defines_solaris64-x86_64-gcc': [
'DSO_DLFCN',
'HAVE_DLFCN_H',
'NDEBUG',
'OPENSSL_THREADS',
'OPENSSL_NO_DYNAMIC_ENGINE',
'OPENSSL_PIC',
'OPENSSL_IA32_SSE2',
'OPENSSL_BN_ASM_MONT',
'OPENSSL_BN_ASM_MONT5',
'OPENSSL_BN_ASM_GF2m',
'SHA1_ASM',
'SHA256_ASM',
'SHA512_ASM',
'RC4_ASM',
'MD5_ASM',
'AES_ASM',
'VPAES_ASM',
'BSAES_ASM',
'GHASH_ASM',
'ECP_NISTZ256_ASM',
'PADLOCK_ASM',
'POLY1305_ASM',
],
'openssl_cflags_solaris64-x86_64-gcc': [
'-m64 -Wall -DL_ENDIAN -O3 -pthread -DFILIO_H',
],
'openssl_ex_libs_solaris64-x86_64-gcc': [
'-lsocket -lnsl -ldl -pthread',
],
},
'include_dirs': [
'.',
'./include',
'./crypto',
'./crypto/include/internal',
],
'defines': ['<@(openssl_defines_solaris64-x86_64-gcc)'],
'cflags' : ['<@(openssl_cflags_solaris64-x86_64-gcc)'],
'libraries': ['<@(openssl_ex_libs_solaris64-x86_64-gcc)'],
'sources': ['<@(openssl_sources)', '<@(openssl_sources_solaris64-x86_64-gcc)'],
}
| [
"madanagopal_thirumalai@comcast.com"
] | madanagopal_thirumalai@comcast.com |
dfcd5e0517a1eaa9536b12a9beb3aee6be961028 | dc3a4dac2980bd000007959ab24f072f3628f647 | /blogapp/migrations/0010_auto_20190430_2146.py | e54c392073719077e1bb7b92774065b1dc912c73 | [] | no_license | AlexisMunera98/blog-app-roiback | 42b473bdd6abf4c873f00c314bef65be305d4235 | 26d1772393000355a73c41423ebd3dbbc5b11782 | refs/heads/master | 2020-05-18T20:25:01.155931 | 2019-05-10T03:14:03 | 2019-05-10T03:14:03 | 184,628,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # Generated by Django 2.2 on 2019-05-01 02:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0009_auto_20190430_2146'),
]
operations = [
migrations.AddField(
model_name='post',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='post',
name='is_disabled',
field=models.BooleanField(default=False),
),
]
| [
"AlexisMunera98@gmail.com"
] | AlexisMunera98@gmail.com |
78a56b01ff5944549b617302417c71ae0dfd2576 | 5f468f45aeb34721f18db9c44ea57f18522db0e7 | /posts/permissions.py | 4ff68af345ff2aad131e29fe1a8a3d5d1805f9ef | [] | no_license | patpio/diary_api | 3cb7f4c99f6ce483123be0832cd714ae1256ffaf | d6b8b87bdeb142d9c2ce894252ac8f77352b643c | refs/heads/master | 2023-03-15T02:35:13.071248 | 2021-03-15T17:51:06 | 2021-03-15T17:51:06 | 345,736,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | from rest_framework import permissions
from rest_framework.permissions import BasePermission
class IsAuthorOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.author == request.user
| [
"pat_pi@yahoo.com"
] | pat_pi@yahoo.com |
f4f761270a7a9f05522084b9a90f626f3507e957 | f2bcca3cca6f9745f550a37dabc78c06cf4e8551 | /calendar_bot/actions/direct_sign_out.py | a33f532b917db7b7b48a924adb7b2abf68a2348c | [] | no_license | wonsokoh1019/samplebot | d9bd10b9d1a41a6ad3af7640161f749770e3fdf9 | d937d9c7fb2c157df7716bc0138fed18a40b4008 | refs/heads/master | 2022-10-07T09:58:14.716833 | 2019-11-15T15:37:15 | 2019-11-15T15:37:15 | 221,910,461 | 0 | 1 | null | 2022-09-21T22:06:23 | 2019-11-15T11:27:45 | Python | UTF-8 | Python | false | false | 2,198 | py | # !/bin/env python
# -*- coding: utf-8 -*-
import tornado.web
import logging
from calendar_bot.model.data import i18n_text, make_text, make_quick_reply
from calendar_bot.externals.send_message import push_message
from calendar_bot.actions.message import invalid_message, TimeStruct, \
create_quick_replay_items
from calendar_bot.model.processStatusDBHandle import get_status_by_user
LOGGER = logging.getLogger("calendar_bot")
def deal_sign_out_message(sign_time, manual_flag=False):
call_back = "sign_out"
if manual_flag:
call_back = "manual_sign_out"
user_time = TimeStruct(sign_time)
text = make_text("Register the current time {date} at {hours}:{min} "
"{interval} as clock-out time?"
.format(date=user_time.date_time.strftime('%m, %d %A'),
hours=user_time.hours, min=user_time.min,
interval=user_time.interval_en))
if manual_flag:
text = make_text("Register the entered {date} at {hours}:{min} "
"{interval} as clock-out time?"
.format(date=user_time.date_time.strftime('%m, %d %A'),
hours=user_time.hours, min=user_time.min,
interval=user_time.interval_en))
reply_items = create_quick_replay_items(
"confirm_out&time=" + user_time.str_current_time_tick, call_back)
text["quickReply"] = make_quick_reply(reply_items)
return text
@tornado.gen.coroutine
def deal_sign_out(account_id, current_date, sign_time, manual_flag=False):
content = get_status_by_user(account_id, current_date)
process = None
if content is not None:
status = content[0]
process = content[1]
if status == "out_done":
return invalid_message()
if process is None or process != "sign_in_done":
return invalid_message()
return deal_sign_out_message(sign_time, manual_flag)
@tornado.gen.coroutine
def direct_sign_out(account_id, current_date, sign_time, _):
content = yield deal_sign_out(account_id, current_date, sign_time)
yield push_message(account_id, content)
| [
"noreply@github.com"
] | wonsokoh1019.noreply@github.com |
c888c63ec49ddbd0b70ca05f332728d7f71ad72a | ce7edd5e65194d339f26a7fbbfad18b6924924da | /MainUI.py | 4c18a6cb82db7ee8b6e4a057aeae7a6086a842df | [] | no_license | KHyoseon/Software_Design | 5e6bc806459940fd1f6e221f1bb038a0576ce0c1 | 77babeefca056d979efe643e108c162e6eb8cc00 | refs/heads/master | 2020-09-20T10:20:56.354866 | 2019-11-27T11:17:03 | 2019-11-27T11:17:03 | 224,449,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | from tkinter import *
class MainUI:
def __init__(self, window: Tk, font):
self.window = window
self.frame_main = Frame(window)
| [
"gidskql6671@naver.com"
] | gidskql6671@naver.com |
cf12f36bcde98e4ea719531190bf64dc351d7f18 | 644a77f60a1c12127606bfff2ee0f9f54d55830b | /Tests/utils/docstring_parsers_ut.py | 0b70bdfdfc5a6f077978878461ef752db7b3504a | [] | no_license | FooBarShebang/pos | f825f649fcd8eaf7b5c84ec1687c2b121178cc44 | bff6d6ca8e9e4d64f8b9470beb5902c8e1ec86f0 | refs/heads/master | 2021-06-23T16:30:30.011955 | 2019-03-01T07:44:00 | 2019-03-01T07:44:00 | 133,469,732 | 1 | 0 | null | 2018-10-03T17:16:28 | 2018-05-15T06:31:43 | Python | UTF-8 | Python | false | false | 22,388 | py | #usr/bin/python
"""
Module pos.tests.utils.docstring_parsers_ut
Implements unit testing of the module pos.utils.docstring_parsers.
"""
__version__ = "0.0.1.0"
__date__ = "18-07-2018"
__status__ = "Testing"
#imports
#+ standard libraries
import sys
import unittest
#+ my libraries
import pos.utils.docstring_parsers as testmodule
from pos.exceptions import CustomTypeError, CustomValueError
#helper functions
def epytext_style(Name, *args, **kwargs):
"""
Epytext / javadoc style docstring
@param Name: string, name of something
@param *args: (optional), any amount of arguments of any types, not really
used
@param **kwargs: (optional), any amount of arguments of any types, not
really used
@return: None
@raise None: no exceptions
>>>epytext_style('whatever')
None
Version 0.0.1.0
"""
return None
def rest_style(Name, *args, **kwargs):
"""
reST style docstring
:param Name: name of something
:type Name: str
:param *args: (optional), any amount of arguments of any types, not really
used
:type *args: /type A/
:param **kwargs: (optional), any amount of arguments of any types, not
really used
:type **kwargs: /type B/
:returns: nothing is returned
:rtype: None
:raises None: no exceptions
:Example:
>>>rest_style('whatever')
None
Version 0.0.1.0
"""
return None
def google_style(Name, *args, **kwargs):
"""
Google / AA style docstring
Signature:
str, /type A, type B/ -> None
Args:
Name: string, name of something
*args: (optional), any amount of arguments of any types, not really used
**kwargs: (optional), keyword, any amount of arguments of any types, not
really used
Returns:
None
Raises:
None: no exceptions
Example:
>>>google_style('whatever')
None
Version 0.0.1.0
"""
return None
def numpydoc_style(Name, *args, **kwargs):
"""
NumPydoc style docstring
Parameters
----------
Name : string, name of something
*args : (optional), any amount of arguments of any types
not really used
**kwargs : (optional), keyword, any amount of arguments of any types
not really used
Returns
-------
None
nothing is returned
Raises
------
None
no exceptions
Usage
-----
>>>numpydoc_style('whatever')
None
Version 0.0.1.0
"""
return None
#globals
dictTests = {
"Epytext" : {
"function" : epytext_style,
"trimmed" : '\n'.join([
"Epytext / javadoc style docstring", "",
"@param Name: string, name of something",
"@param *args: (optional), any amount of arguments of any types, not really",
"used",
"@param **kwargs: (optional), any amount of arguments of any types, not",
"really used",
"@return: None",
"@raise None: no exceptions", "",
">>>epytext_style('whatever')",
"None", "",
"Version 0.0.1.0"
]),
"reduced" : '\n'.join([
"Epytext / javadoc style docstring", "",
"Version 0.0.1.0"
]),
"signature" : None,
"arguments" : ['Name', '*args', '**kwargs'],
"returns" : ['None'],
"raises" : ['None'],
"parser" : testmodule.EpytextParser
},
"reST" : {
"function" : rest_style,
"trimmed" : '\n'.join([
"reST style docstring", "",
":param Name: name of something",
":type Name: str",
":param *args: (optional), any amount of arguments of any types, not really",
" used",
":type *args: /type A/",
":param **kwargs: (optional), any amount of arguments of any types, not",
" really used",
":type **kwargs: /type B/",
":returns: nothing is returned",
":rtype: None",
":raises None: no exceptions", "",
":Example:", "",
">>>rest_style('whatever')",
"None", "",
"Version 0.0.1.0"
]),
"reduced" : '\n'.join([
"reST style docstring", "",
"Version 0.0.1.0"
]),
"signature" : 'str, /type A/, /type B/ -> None',
"arguments" : ['Name', '*args', '**kwargs'],
"returns" : ['None'],
"raises" : ['None'],
"parser" : testmodule.reSTParser
},
"Google" : {
"function" : google_style,
"trimmed" : '\n'.join([
"Google / AA style docstring", "",
"Signature:",
" str, /type A, type B/ -> None", "",
"Args:",
" Name: string, name of something",
" *args: (optional), any amount of arguments of any types, not really used",
" **kwargs: (optional), keyword, any amount of arguments of any types, not",
" really used", "",
"Returns:",
" None", "",
"Raises:",
" None: no exceptions", "",
"Example:", "",
" >>>google_style('whatever')",
" None", "",
"Version 0.0.1.0"
]),
"reduced" : '\n'.join([
"Google / AA style docstring", "", "Signature:",
" str, /type A, type B/ -> None", "",
"Version 0.0.1.0"
]),
"signature" : None,
"arguments" : ['Name', '*args', '**kwargs'],
"returns" : ['None'],
"raises" : ['None'],
"parser" : testmodule.AAParser
},
"AA" : {
"function" : google_style,
"trimmed" : '\n'.join([
"Google / AA style docstring", "",
"Signature:",
" str, /type A, type B/ -> None", "",
"Args:",
" Name: string, name of something",
" *args: (optional), any amount of arguments of any types, not really used",
" **kwargs: (optional), keyword, any amount of arguments of any types, not",
" really used", "",
"Returns:",
" None", "",
"Raises:",
" None: no exceptions", "",
"Example:", "",
" >>>google_style('whatever')",
" None", "",
"Version 0.0.1.0"
]),
"reduced" : '\n'.join([
"Google / AA style docstring", "",
"Version 0.0.1.0"
]),
"signature" : "str, /type A, type B/ -> None",
"arguments" : ['Name', '/*args/', '/**kwargs/'],
"returns" : ['None'],
"raises" : ['None'],
"parser" : testmodule.AAParser
},
"NumPy" : {
"function" : numpydoc_style,
"trimmed" : '\n'.join([
"NumPydoc style docstring", "",
"Parameters", "----------",
"Name : string, name of something",
"*args : (optional), any amount of arguments of any types",
" not really used",
"**kwargs : (optional), keyword, any amount of arguments of any types",
" not really used", "",
"Returns", "-------",
"None", " nothing is returned", "",
"Raises", "------",
"None", " no exceptions", "",
"Usage", "-----",
">>>numpydoc_style('whatever')",
"None", "",
"Version 0.0.1.0"
]),
"reduced" : '\n'.join([
"NumPydoc style docstring", "",
"Version 0.0.1.0"
]),
"signature" : None,
"arguments" : ['Name', '*args', '**kwargs'],
"returns" : ['None'],
"raises" : ['None'],
"parser" : testmodule.NumPydocParser
}
}
#classes
#+ test cases
class Test_GenericParser(unittest.TestCase):
"""
Test cases for the class pos.utils.docstring_parsers.GenericParser
"""
@classmethod
def setUpClass(cls):
"""
Preparation for the test cases, done only once.
"""
cls.TestClass = testmodule.GenericParser
cls.TestCases = ["Epytext", 'reST', 'Google', 'NumPy']
def test_trimDocstring(self):
"""
Test the method trimDocstring() inherited from the GenericParser class -
i.e. removal of the excessive indentation, tailing whitespaces (for
each line) and heading and tailing empty lines.
Also tests that the proper exceptions are raised with the wrong input.
"""
for strCase in self.TestCases:
funcSource = dictTests[strCase]["function"]
strSource = funcSource.__doc__
strResult = dictTests[strCase]["trimmed"]
strMessage = 'Wrong result of trimDocstring() method for {}'.format(
strCase)
strTest = self.TestClass.trimDocstring(strSource)
self.assertEqual(strTest, strResult, strMessage)
with self.assertRaises(CustomTypeError):
self.TestClass.trimDocstring(1) #not a string
with self.assertRaises(CustomValueError):
self.TestClass.trimDocstring('') #empty string
with self.assertRaises(CustomValueError):
self.TestClass.trimDocstring(' \n\t\n')
#string with only whitespaces
class Test_EpytextParser(Test_GenericParser):
"""
Test cases for the class pos.utils.docstring_parsers.EpytextParser
"""
@classmethod
def setUpClass(cls):
"""
Preparation for the test cases, done only once.
"""
cls.TestClass = testmodule.EpytextParser
cls.TestCases = ["Epytext"]
def test_reduceDocstring(self):
"""
Test the method reduceDocstring() inherited from the GenericParser class
- i.e. removal of the excessive indentation, tailing whitespaces (for
each line) and heading and tailing empty lines, as well as of all lines
related to the automated documentation generation.
Also tests that the proper exceptions are raised with the wrong input.
"""
for strCase in self.TestCases:
funcSource = dictTests[strCase]["function"]
strSource = funcSource.__doc__
strResult = dictTests[strCase]["reduced"]
strMessage = ' '.join(['Wrong result of reduceDocstring() method',
'for', strCase, 'format'])
strTest = self.TestClass.reduceDocstring(strSource)
self.assertEqual(strTest, strResult, strMessage)
with self.assertRaises(CustomTypeError):
self.TestClass.reduceDocstring(1) #not a string
with self.assertRaises(CustomValueError):
self.TestClass.reduceDocstring('') #empty string
with self.assertRaises(CustomValueError):
self.TestClass.reduceDocstring(' \n\t\n')
#string with only whitespaces
def test_extractSignature(self):
"""
Test the method extractSignature() inherited from the GenericParser
class - i.e. extraction of the explicitly defined signature (not for
all formats).
Also tests that the proper exceptions are raised with the wrong input.
"""
for strCase in self.TestCases:
funcSource = dictTests[strCase]["function"]
strSource = funcSource.__doc__
strResult = dictTests[strCase]["signature"]
strMessage = ' '.join(['Wrong result of extractSignature() method',
'for', strCase, 'format'])
strTest = self.TestClass.extractSignature(strSource)
if not (strResult is None):
self.assertEqual(strTest, strResult, strMessage)
else:
self.assertIsNone(strTest, strMessage)
with self.assertRaises(CustomTypeError):
self.TestClass.extractSignature(1) #not a string
with self.assertRaises(CustomValueError):
self.TestClass.extractSignature('') #empty string
with self.assertRaises(CustomValueError):
self.TestClass.extractSignature(' \n\t\n')
#string with only whitespaces
def test_extractArguments(self):
"""
Test the method extractArguments() inherited from the GenericParser
class - i.e. extraction of the names of the parameters / arguments of
the method / function.
Also tests that the proper exceptions are raised with the wrong input.
"""
for strCase in self.TestCases:
funcSource = dictTests[strCase]["function"]
strSource = funcSource.__doc__
strResult = dictTests[strCase]["arguments"]
strMessage = ' '.join(['Wrong result of extractArguments() method',
'for', strCase, 'format'])
strTest = self.TestClass.extractArguments(strSource)
self.assertEqual(strTest, strResult, strMessage)
with self.assertRaises(CustomTypeError):
self.TestClass.extractArguments(1) #not a string
with self.assertRaises(CustomValueError):
self.TestClass.extractArguments('') #empty string
with self.assertRaises(CustomValueError):
self.TestClass.extractArguments(' \n\t\n')
#string with only whitespaces
def test_extractReturnedValues(self):
"""
Test the method extractReturnedValues() inherited from the GenericParser
class - i.e. extraction of the names of the types of the returned
value(s) of the method / function.
Also tests that the proper exceptions are raised with the wrong input.
"""
for strCase in self.TestCases:
funcSource = dictTests[strCase]["function"]
strSource = funcSource.__doc__
strResult = dictTests[strCase]["returns"]
strMessage = ' '.join(['Wrong result of extractReturnedValues()',
'method for', strCase, 'format'])
strTest = self.TestClass.extractReturnedValues(strSource)
self.assertEqual(strTest, strResult, strMessage)
with self.assertRaises(CustomTypeError):
self.TestClass.extractReturnedValues(1) #not a string
with self.assertRaises(CustomValueError):
self.TestClass.extractReturnedValues('') #empty string
with self.assertRaises(CustomValueError):
self.TestClass.extractReturnedValues(' \n\t\n')
#string with only whitespaces
def test_extractRaises(self):
"""
Test the method extractRaises() inherited from the GenericParser
class - i.e. extraction of the names of the exceptions, which can be
raised by the method / function.
Also tests that the proper exceptions are raised with the wrong input.
"""
for strCase in self.TestCases:
funcSource = dictTests[strCase]["function"]
strSource = funcSource.__doc__
strResult = dictTests[strCase]["raises"]
strMessage = ' '.join(['Wrong result of extractRaises()',
'method for', strCase, 'format'])
strTest = self.TestClass.extractRaises(strSource)
self.assertEqual(strTest, strResult, strMessage)
with self.assertRaises(CustomTypeError):
self.TestClass.extractRaises(1) #not a string
with self.assertRaises(CustomValueError):
self.TestClass.extractRaises('') #empty string
with self.assertRaises(CustomValueError):
self.TestClass.extractRaises(' \n\t\n')
#string with only whitespaces
class Test_reSTParser(Test_EpytextParser):
"""
Test cases for the class pos.utils.docstring_parsers.reSTParser
"""
@classmethod
def setUpClass(cls):
"""
Preparation for the test cases, done only once.
"""
cls.TestClass = testmodule.reSTParser
cls.TestCases = ["reST"]
class Test_GoogleParser(Test_EpytextParser):
"""
Test cases for the class pos.utils.docstring_parsers.GoogleParser
"""
@classmethod
def setUpClass(cls):
"""
Preparation for the test cases, done only once.
"""
cls.TestClass = testmodule.GoogleParser
cls.TestCases = ["Google"]
class Test_AAParser(Test_EpytextParser):
"""
Test cases for the class pos.utils.docstring_parsers.AAParser
"""
@classmethod
def setUpClass(cls):
"""
Preparation for the test cases, done only once.
"""
cls.TestClass = testmodule.AAParser
cls.TestCases = ["AA"]
class Test_NumPydocParser(Test_EpytextParser):
"""
Test cases for the class pos.utils.docstring_parsers.NumPydocParser
"""
@classmethod
def setUpClass(cls):
"""
Preparation for the test cases, done only once.
"""
cls.TestClass = testmodule.NumPydocParser
cls.TestCases = ["NumPy"]
class Test_guess_docstyle(unittest.TestCase):
"""
Test cases for the function pos.utils.docstring_parsers.guess_docstyle().
"""
def test_guess_docstyle(self):
"""
Checks that the function guess_docstyle() guesses the docstring style
for the auto-generation of documentation correctly.
Also tests that the proper exceptions are raised with the wrong input.
"""
for strCase in dictTests:
funcSource = dictTests[strCase]["function"]
strSource = funcSource.__doc__
clsResult = dictTests[strCase]["parser"]
strMessage = ' '.join(['Wrong result of guess_docstyle() function',
'for', strCase, 'format'])
clsTest = testmodule.guess_docstyle(strSource)
self.assertIs(clsTest, clsResult, strMessage)
clsTest = testmodule.guess_docstyle(self.__doc__)
clsResult = testmodule.AAParser
strMessage = ' '.join(['Wrong result of guess_docstyle() function',
'for an undefined format'])
self.assertIs(clsTest, clsResult, strMessage)
with self.assertRaises(CustomTypeError):
testmodule.guess_docstyle(1) #not a string
with self.assertRaises(CustomValueError):
testmodule.guess_docstyle('') #empty string
with self.assertRaises(CustomValueError):
testmodule.guess_docstyle(' \n\t\n')
#string with only whitespaces
class Test_indent_docstring(unittest.TestCase):
"""
Test cases for the function pos.utils.docstring_parsers.indent_docstring().
"""
def test_indent_docstring(self):
"""
Checks that the function indent_docstring() properly indents a docstring
- i.e. each line is prefixed by the same amount of white spaces (4, 8,
etc.). Must not change the docstring if zero indentation is passed.
"""
strInput = 'test\n docstring\n\nexample'
strOutput = ' test\n docstring\n \n example'
self.assertEqual(strOutput, testmodule.indent_docstring(strInput))
strOutput = '\n'.join([' test', ' docstring',
' ', ' example'])
self.assertEqual(strOutput, testmodule.indent_docstring(strInput, 2))
strOutput = '\n'.join([' test', ' docstring',
' ', ' example'])
self.assertEqual(strOutput, testmodule.indent_docstring(strInput, 3))
self.assertEqual(strInput, testmodule.indent_docstring(strInput,
iTabs = 0))
for strCase in dictTests:
funcSource = dictTests[strCase]["function"]
strSource = '\n'.join(funcSource.__doc__.split('\n')[1:-1])
strInput = dictTests[strCase]["trimmed"]
strMessage =' '.join(['Wrong result of indent_docstring() function',
'for', strCase, 'format'])
strTest = testmodule.indent_docstring(strInput)
self.assertEqual(strSource, strTest, strMessage)
def test_indent_docstring_raises(self):
"""
Checks that the function raises proper exceptions with improper input.
"""
with self.assertRaises(CustomTypeError):
testmodule.indent_docstring(1) #not a string - first argument
with self.assertRaises(CustomValueError):
testmodule.indent_docstring('') #first argument is an empty string
with self.assertRaises(CustomTypeError):
testmodule.indent_docstring('1', "1") #not an int - second argument
with self.assertRaises(CustomValueError):
testmodule.indent_docstring('1', -1) #second argument is negative
#+ test suites
TestSuite1 = unittest.TestLoader().loadTestsFromTestCase(Test_GenericParser)
TestSuite2 = unittest.TestLoader().loadTestsFromTestCase(Test_EpytextParser)
TestSuite3 = unittest.TestLoader().loadTestsFromTestCase(Test_reSTParser)
TestSuite4 = unittest.TestLoader().loadTestsFromTestCase(Test_GoogleParser)
TestSuite5 = unittest.TestLoader().loadTestsFromTestCase(Test_AAParser)
TestSuite6 = unittest.TestLoader().loadTestsFromTestCase(Test_NumPydocParser)
TestSuite7 = unittest.TestLoader().loadTestsFromTestCase(Test_guess_docstyle)
TestSuite8 = unittest.TestLoader().loadTestsFromTestCase(Test_indent_docstring)
TestSuite = unittest.TestSuite()
TestSuite.addTests([TestSuite1, TestSuite2, TestSuite3, TestSuite4, TestSuite5,
TestSuite6, TestSuite7, TestSuite8])
if __name__ == "__main__":
sys.stdout.write("Conducting pos.utils.docstring_parsers module tests...\n")
sys.stdout.flush()
unittest.TextTestRunner(verbosity = 2).run(TestSuite) | [
"a.azarov@diagnoptics.com"
] | a.azarov@diagnoptics.com |
a4faf0f197626e347318d3e265abfc2d9c3edd63 | c53b3e120c59557daaa2fa5b7626413105eb5965 | /tendenci/apps/forums/templatetags/forum_tags.py | 06fe81ca81cd78b74fc8d55d4ef04945694395b8 | [
"BSD-2-Clause"
] | permissive | chendong0444/ams | 8483334d9b687708d533190b62c1fa4fd4690f2c | f2ac4ecc076b223c262f2cde4fa3b35b4a5cd54e | refs/heads/master | 2021-05-01T03:59:18.682836 | 2018-07-23T06:33:41 | 2018-07-23T06:33:41 | 121,194,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,271 | py | from datetime import datetime
from django.utils.translation import ugettext_lazy as _
from django.template import Library, TemplateSyntaxError
from tendenci.apps.base.template_tags import ListNode, parse_tag_kwargs
from tendenci.apps.forums.models import Category
register = Library()
class ListForumCategoriesNode(ListNode):
model = Category
perms = 'forums.view_category'
def custom_model_filter(self, items, user):
if not user.is_staff:
return items.filter(hidden=False)
return items
@register.tag
def list_forum_categories(parser, token):
"""
Used to pull a list of :model:`forums.Category` items.
Usage::
{% list_forum_categories as [varname] [options] %}
Be sure the [varname] has a specific name like ``forums_sidebar`` or
``forum_categories_list``. Options can be used as [option]=[value]. Wrap text values
in quotes like ``tags="cool"``. Options include:
``limit``
The number of items that are shown. **Default: 3**
``order``
The order of the items. **Default: name**
``user``
Specify a user to only show public items to all. **Default: Viewing user**
``query``
The text to search for items. Will not affect order.
``random``
Use this with a value of true to randomize the items included.
Example::
{% list_forum_categories as forum_categories_list limit=5 %}
<ul>
{% for cat in forum_categories_list %}
{% with cat.forums.count as c %}
<li><a href="{% url 'pybb:category' cat.slug %}">{{ cat.name }}</a> - {{ c }} forum{{ c|pluralize }}</li>
{% endwith %}
{% endfor %}
</ul>
"""
args, kwargs = [], {}
bits = token.split_contents()
context_var = bits[2]
if len(bits) < 3:
message = "'%s' tag requires more than 3" % bits[0]
raise TemplateSyntaxError(_(message))
if bits[1] != "as":
message = "'%s' second argument must be 'as" % bits[0]
raise TemplateSyntaxError(_(message))
kwargs = parse_tag_kwargs(bits)
if 'order' not in kwargs:
kwargs['order'] = 'name'
return ListForumCategoriesNode(context_var, *args, **kwargs)
| [
"chendong@shinezone.com"
] | chendong@shinezone.com |
114e5402ccd06cb99473cae68e511b8125a40704 | e886a23776c4ecb05306359ada60b3c05651a9c8 | /appy/views.py | 0b9cf0e5739a56dbb26b6c51e169ce89bb0f556f | [] | no_license | brantphoto/appy | fc2350ff19cb9e3f96dd6b47543b69aab8980103 | 488b6a462bf611294f8bdf8303a89f223420d81e | refs/heads/master | 2021-01-10T14:21:23.648173 | 2015-10-05T00:31:47 | 2015-10-05T00:31:47 | 43,658,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | from django.shortcuts import render
from appy.models import Question
from rest_framework import viewsets
from appy.serializers import QuestionsSerializer
class QuestionViewSet(viewsets.ModelViewSet):
queryset = Question.objects.all().order_by('-question_text')
serializer_class = QuestionsSerializer
# Create your views here.
| [
"brant.barger@gmail.com"
] | brant.barger@gmail.com |
8e7320ae839c6a49bf610bd542a4bdb860e12e84 | 0dabc6017efc0781550233ebb234ab4d22a9150b | /Avarijos_LT_2018.py | 4499d91af60862e8ad0a04a16150feaf7e7bbe91 | [] | no_license | viceruta/My-Final-Work-at-VSC | 27e468e64e074b0ab9ef38ee52fa51048bf26edc | 91213ea4781868059e061dd5e6aa0173a56ad62a | refs/heads/master | 2021-03-10T00:23:16.163455 | 2020-03-10T20:42:04 | 2020-03-10T20:42:04 | 246,398,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,770 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
def define(zod):
print("-" * 50)
print(zod)
define('Duomenų pasiėmimas iš failo')
filename = r'C:\Users\Administrator\PycharmProjects\mokslai\eismas\Eimas_02-09.xls'
df = pd.read_excel(filename, sep=";", encoding="ansi")
define('Kokie duomenų tipai laikomi kiekviename stulpelyje?')
df.info()
define('Kiek eilučių ir kiek stulpelių yra duomenyse?')
print(df.shape)
print('Gauname, kad turime 2928 avarijas')
define('Kokie duomenys yra stebimi?')
print(df.columns)
define('Sutvarkymas stulpelių pavadinimų')
df.columns = df.columns.str.replace(' ', '_')
print(df.columns)
#---------------------------------------------------------------------------------------------------------------------
define('Hipotezė:'
'PRADEDANTIEJI VAIRUOTOJAI 2018 BUVO GERESNI VAIRUOTOJAI NEGU SENJORAI-')
#---------------------------------------------------------------------------------------------------------------------
define('Koks yra vidutinis stebimų žmonių vairavimo stažas?')
print(round(df.Vairavimo_stazas_metais.mean(), 2))
define('Kiek iš viso vairuotojų su stažu? (Išmetame tuščius duomenis)')
df = df[pd.notnull(df.Vairavimo_stazas_metais)]
print(df.Vairavimo_stazas_metais.count())
print('Gauname, kad iš 2928 avarijų 368 yra dėl neužpildytų duomenų, dviračių vairuotojų ar pėsčiųjų kaltės')
define('Vairavimo stažo grupės')
print(df.Vairavimo_stazas_metais.unique())
print('Matome, kad yra grupių su minuso ženklu')
define('Patikriname, kiek avarijų šioje grupėje?')
filtered_data = df[df["Vairavimo_stazas_metais"]<0].Vairavimo_stazas_metais.count()
print(filtered_data)
define('Išmetame grupes, kuriose vairavimo stažas mažiau 0')
df = df[(pd.notnull(df.Vairavimo_stazas_metais)) & ((df.Vairavimo_stazas_metais)>= 0)]
print(df.Vairavimo_stazas_metais.unique())
define('Kiek buvo avarijų dėl vairuotojų neturinčių 2m vairavimo stažo kaltės?')
KlevoLapas = df[df.Vairavimo_stazas_metais <2].Vairavimo_stazas_metais.count()
print(KlevoLapas)
define('Kiek buvo avarijų dėl senjorų kaltės (senjorų vairavimo stažas nuo 47 metų)?')
Senjorai = df[df.Vairavimo_stazas_metais >= 47].Vairavimo_stazas_metais.count()
print(Senjorai)
define('Kiek yra skirtingų vairavimo stažo metų grupių?')
print(df.Vairavimo_stazas_metais.nunique())
define('Vairuotojų vairavimo stažo histograma')
df.Vairavimo_stazas_metais.plot.hist(bins =62, color= "#395280")
plt.xlabel('Vairavimo stažas metais')
plt.ylabel('Vairuotojų skaičius')
plt.title('Vairavimo stažo \nhistograma')
plt.grid(True)
plt.show()
#----------------------------------------------------------------------------------------------------------------------
define('Nors iš padarytų avarijų skaičiaus pagal tiriamas grupes, galima teigti, kad pradedantieji nėra geresni '
'vairuotojai, tačiau reikia patikrinti sąlygas avarijų metu')
#----------------------------------------------------------------------------------------------------------------------
define ('Kokiomis oro sąlygomis įvyko avarijos vairuojant senjorams?')
OroSal_S = df.loc[df['Vairavimo_stazas_metais'] >=47, 'Meteorologines_salygos'].unique()
print(OroSal_S)
define ('Kokiomis oro sąlygomis įvyko avarijos vairuojant pradedantiesiems?')
OroSal_S = df.loc[df['Vairavimo_stazas_metais'] <2, 'Meteorologines_salygos'].unique()
print(OroSal_S)
define('Patikrinu, kokiomis oro sąlygomis senjorai ir pradedantieji padarė daugiausiai avarijų')
OroSal_S = df.loc[df['Vairavimo_stazas_metais'] >=47, 'Meteorologines_salygos']
OroSal_S.to_excel('SenjoruOroSalygos.xlsx')
filename = r'C:\Users\Administrator\PycharmProjects\mokslai\eismas\SenjoruOroSalygos.xlsx'
df1 = pd.read_excel(filename, sep=";", encoding="ansi")
OroSal_P = df.loc[df['Vairavimo_stazas_metais'] <2, 'Meteorologines_salygos']
OroSal_P.to_excel('PradedanciujuOroSalygos.xlsx')
filename = r'C:\Users\Administrator\PycharmProjects\mokslai\eismas\PradedanciujuOroSalygos.xlsx'
df2 = pd.read_excel(filename, sep=";", encoding="ansi")
plt.figure(1)
ax1 = plt.subplot(121)
df1.Meteorologines_salygos.value_counts().plot(kind="bar", color= "#584f75",ax=ax1)
plt.xlabel('Oro sąlygos')
plt.ylabel('Senjorų padarytų avarijų sk.')
plt.grid(True)
plt.tight_layout()
ax2=plt.subplot(122)
df2.Meteorologines_salygos.value_counts().plot(kind="bar", color= "#c6a4a8",ax=ax2)
plt.xlabel('Oro sąlygos')
plt.ylabel('Pradedančiųjų padarytų avarijų sk.')
plt.grid(True)
plt.tight_layout()
plt.show()
#----------------------------------------------------------------------------------------------------------------------
define('Kadangi daugiausiai avarijų padaryta tokiomis pačiomis oro sąlygomis, analizė vyksta toliau. '
'Tikrinu pagal avarijų metu buvusią kelio dangą')
#----------------------------------------------------------------------------------------------------------------------
define ('Kokia buvo kelio dangos buklė vairuojant senjorams?')
KelioDanga_S = df.loc[df['Vairavimo_stazas_metais'] >=47, 'Dangos_bukle'].unique()
print(KelioDanga_S)
define ('Kokia buvo kelio dangos buklė vairuojant pradedantiesiems?')
KelioDanga_P = df.loc[df['Vairavimo_stazas_metais'] <2, 'Dangos_bukle'].unique()
print(KelioDanga_P)
define('Patikrinu, kokiai dangos buklei esant senjorai ir pradedantieji padarė daugiausiai avarijų')
KelioDanga_S = df.loc[df['Vairavimo_stazas_metais'] >=47, 'Dangos_bukle']
KelioDanga_S.to_excel('SenjoruKelioDanga.xlsx')
filename = r'C:\Users\Administrator\PycharmProjects\mokslai\eismas\SenjoruKelioDanga.xlsx'
df3 = pd.read_excel(filename, sep=";", encoding="ansi")
KelioDanga_P = df.loc[df['Vairavimo_stazas_metais'] <2, 'Dangos_bukle']
KelioDanga_P.to_excel('PradedanciujuKelioDanga.xlsx')
filename = r'C:\Users\Administrator\PycharmProjects\mokslai\eismas\PradedanciujuKelioDanga.xlsx'
df4 = pd.read_excel(filename, sep=";", encoding="ansi")
plt.figure(2)
ax3 = plt.subplot(121)
df3.Dangos_bukle.value_counts().plot(kind="bar", color= "#584f75",ax=ax3)
plt.xlabel('Dangos būklė')
plt.ylabel('Senjorų padarytų avarijų sk.')
plt.grid(True)
plt.tight_layout()
ax4 = plt.subplot(122)
df4.Dangos_bukle.value_counts().plot(kind="bar", color= "#c6a4a8",ax=ax4)
plt.xlabel('Dangos būklė')
plt.ylabel('Pradedančiųjų padarytų avarijų sk.')
plt.grid(True)
plt.tight_layout()
plt.show()
#----------------------------------------------------------------------------------------------------------------------
define('Kadangi atsakymai sutapo- ir pradedantieji ir senjorai daugiausiai avarijų padarė esant sausai kelio dangai, '
'tiriu sekančią sąlygą')
#----------------------------------------------------------------------------------------------------------------------
define ('Koks buvo paros metas vairuojant senjorams?')
ParosMetas_S = df.loc[df['Vairavimo_stazas_metais'] >=47, 'Paros_metas'].unique()
print(ParosMetas_S)
define ('Koks buvo paros metas vairuojant pradedantiesiems?')
ParosMetas_P = df.loc[df['Vairavimo_stazas_metais'] <2, 'Paros_metas'].unique()
print(ParosMetas_P)
define('Patikrinu, kokiam paros metui esant senjorai ir pradedantieji padarė daugiausiai avarijų')
ParosMetas_S = df.loc[df['Vairavimo_stazas_metais'] >=47, 'Paros_metas']
ParosMetas_S.to_excel('SenjoruParosMetas.xlsx')
filename = r'C:\Users\Administrator\PycharmProjects\mokslai\eismas\SenjoruParosMetas.xlsx'
df5 = pd.read_excel(filename, sep=";", encoding="ansi")
ParosMetas_P = df.loc[df['Vairavimo_stazas_metais'] <2, 'Paros_metas']
ParosMetas_P.to_excel('PradedanciujuParosMetas.xlsx')
filename = r'C:\Users\Administrator\PycharmProjects\mokslai\eismas\PradedanciujuParosMetas.xlsx'
df6 = pd.read_excel(filename, sep=";", encoding="ansi")
plt.figure(3,figsize=(8,4))
Diena = (df5['Paros_metas'] == 'Diena').sum()
Sutemos_prieblanda = (df5['Paros_metas'] == 'Sutemos (prieblanda)').sum()
Tamsus_paros_metas = (df5['Paros_metas'] == 'Tamsus paros metas').sum()
proportions = [Diena, Sutemos_prieblanda, Tamsus_paros_metas]
ax5 = plt.subplot(121)
explode = (0.1, 0, 0)
spalvos=['#7FB3D5', '#1F618D','#34495E']
plt.pie(proportions, labels = ['Diena', 'Sutemos_prieblanda', 'Tamsus_paros_metas'],explode=explode,
colors= spalvos, autopct = '%1.2f%%')
plt.title('Senjorai')
Diena = (df6['Paros_metas'] == 'Diena').sum()
Tamsus_paros_metas = (df6['Paros_metas'] == 'Tamsus paros metas').sum()
Sutemos_prieblanda = (df6['Paros_metas'] == 'Sutemos (prieblanda)').sum()
proportions = [Diena, Sutemos_prieblanda, Tamsus_paros_metas]
ax6 = plt.subplot(122)
explode = (0.1, 0, 0)
spalvos=['#7FB3D5', '#1F618D','#34495E']
plt.pie(proportions, labels = ['Diena', 'Sutemos_prieblanda', 'Tamsus_paros_metas'],explode=explode,
colors= spalvos, autopct = '%1.2f%%')
plt.title('Pradedantieji')
plt.suptitle('Paros metas, kuomet abi grupės padarė daugiausiai avarijų', size = 12)
plt.tight_layout(pad=2)
plt.show()
#----------------------------------------------------------------------------------------------------------------------
define('Pagal tikrintas sąlygas galiu teigti, kad pradedantieji ir senjorai padarė daugiausiai avarijų tokiomis '
'pačiomis sąlygomis- dienos metu, esant giedram orui ir sausai kelio dangai. Išvada - 2018 metais senjorai '
'buvo geresni vairuotojai negu vairuotojai, kurių vairavimo stažas iki dviejų metų')
#----------------------------------------------------------------------------------------------------------------------
define('Su linijine regresija ir koreliacijos koeficientu patikrinu, ar galima pasitikėti gautais rezultatais?')
dfGp = df.groupby('Vairavimo_stazas_metais').count().reset_index()
dfGp.to_excel('GrupuotaPagalStaza.xlsx')
dfGp.plot.scatter(x= 'Vairavimo_stazas_metais',y= 'Laikas')
x = dfGp.Vairavimo_stazas_metais.values.reshape(-1,1)
y = dfGp.Laikas.values.reshape(-1,1)
lm = linear_model.LinearRegression()
xTrain, xTest, yTrain, yTest = train_test_split(x, y, test_size = 0.2)
lm.fit(xTrain,yTrain)
yhat = lm.predict(x)
plt.scatter(x,y,color= '#566573')
plt.plot(x, yhat,color = '#EC7063')
define('Kiek tikslus yra grafikas?:')
print(lm.score(xTest,yTest))
# print(lm.intercept_)
# print(lm.coef_)
print('Koreliacijos koeficientas:')
print(dfGp[['Vairavimo_stazas_metais', 'Laikas']].corr())
print('-stiprus neigiamas ryšys, rezultatai ganėtinai tikslūs')
plt.xlabel('Vairavimo stažas metais')
plt.ylabel('Avarijų skaičius')
plt.title('Vairavimo stažo metais ir avarijų skaičiaus linijinė regresija')
plt.show()
#----------------------------------------------------------------------------------------------------------------------
define('FOR FUN:) '
'Ar BMW padarė daugiausiai avarijų 2018 metais?')
# 'Kiek automobilių sukėlė avarijas? (Išmetame tuščius duomenis)'
df = df[pd.notnull(df.Marke)]
# 'Patikrinu, kokios markės automobiliai sukėlė daugiausiai avarijų.TOP5'
AutoMarke = df[['Vairavimo_stazas_metais', 'Marke']]
AutoMarke.to_excel('AutomobiliuMarkes.xlsx')
filename = r'C:\Users\Administrator\PycharmProjects\mokslai\eismas\AutomobiliuMarkes.xlsx'
dfM = pd.read_excel(filename, sep=";", encoding="ansi")
dfM.Marke.value_counts().head(5).plot(kind="bar", color= "#395280")
plt.xlabel('Markė')
plt.ylabel('Avarijų sk.')
plt.grid(True)
plt.tight_layout()
plt.show()
define('NE') | [
"noreply@github.com"
] | viceruta.noreply@github.com |
769b06663d743e8246de42be5dca168725ef189d | 6e2dd019cec3baa0be2559683606448b5b38dcea | /data/dataset.py | 4107699cd3791b48b48531d7eedd7b3d66b5a800 | [] | no_license | godisboy/Image2image | 8b2b7763b60baf964e36a7955cca29d8a8018470 | ee018cde82ee594186307806df3de9b41c3d50a9 | refs/heads/master | 2020-03-24T11:20:30.095370 | 2018-08-01T07:22:22 | 2018-08-01T07:22:22 | 142,682,857 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,949 | py | import torch
import numpy as np
import torch.utils.data as data
from os import listdir
from os.path import join
from utils.tools import is_image_file
import os
from PIL import Image
import random
def default_loader(path):
return Image.open(path).convert('RGB')
def ToTensor(pic):
"""Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
if isinstance(pic, np.ndarray):
# handle numpy array
img = torch.from_numpy(pic.transpose((2, 0, 1)))
# backard compability
return img.float().div(255)
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
# put it from HWC to CHW format
# yikes, this transpose takes 80% of the loading time/CPU
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
# You should build custom dataset as below.
class DATASET(data.Dataset):
def __init__(self,dataPath='',loadSize=72,fineSize=64,flip=1):
super(DATASET, self).__init__()
# list all images into a list
self.list = [x for x in listdir(dataPath) if is_image_file(x)]
self.dataPath = dataPath
self.loadSize = loadSize
self.fineSize = fineSize
self.flip = flip
def __getitem__(self, index):
# 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open).
path = os.path.join(self.dataPath,self.list[index])
img = default_loader(path) # 256x256
# 2. seperate image A and B; Scale; Random Crop; to Tensor
w,h = img.size
if(h != self.loadSize):
img = img.resize((self.loadSize, self.loadSize), Image.BILINEAR)
if(self.loadSize != self.fineSize):
x1 = random.randint(0, self.loadSize - self.fineSize)
y1 = random.randint(0, self.loadSize - self.fineSize)
img = img.crop((x1, y1, x1 + self.fineSize, y1 + self.fineSize))
if(self.flip == 1):
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
img = ToTensor(img) # 3 x 256 x 256
img = img.mul_(2).add_(-1)
# 3. Return a data pair (e.g. image and label).
return img
def __len__(self):
# You should change 0 to the total size of your dataset.
return len(self.list)
| [
"godisboy"
] | godisboy |
ce9b591e99a544c4dd9964895a9c9f5a26194790 | 8b634dc196162dff328d61bf6f8d4121dfb59bd4 | /Strings/defangIPaddr.py | 20984feacf4936ed4db56e61562583e5c5d002cd | [] | no_license | kqg13/LeetCode | 84268b2146dc8323cb71f041b6664069baaa339c | 1c584f4ca4cda7a3fb3148801a1ff4c73befed24 | refs/heads/master | 2023-08-05T09:46:28.103910 | 2023-07-29T21:02:26 | 2023-07-29T21:02:26 | 165,123,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | # String problem 1108: Defanging an IP Address
# Given a valid (IPv4) IP address, return a defanged version of that IP.
# A defanged IP address replaces every period "." with "[.]".
# Input: address = "255.100.50.0" --> Output: "255[.]100[.]50[.]0"
class Solution(object):
def defangIPaddr(self, address):
"""
:type address: str
:rtype: str
"""
return address.replace('.', '[.]')
| [
"skg2016@nyu.edu"
] | skg2016@nyu.edu |
7910e5619f8e7ca8bc0b0a840627341121ff7be8 | 7a570977ac42ea25c61b812bd9fe3a5ea0a9f33b | /_GTW/_OMP/_SWP/Object_PN.py | a66375e7c7826fd361df7611466c9bdba562fe8d | [
"BSD-3-Clause"
] | permissive | YanjiaSun/tapyr | 6d6f3859abe5c6cf81e12b2319387d02c0d2749f | c261d3e06db1a4fbe0e6535bd97b0134049b3a79 | refs/heads/master | 2021-05-17T22:22:31.570349 | 2020-02-09T09:41:12 | 2020-02-09T09:41:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,587 | py | # -*- coding: utf-8 -*-
# Copyright (C) 2013-2016 Mag. Christian Tanzer All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. tanzer@swing.co.at
# #*** <License> ************************************************************#
# This module is part of the package GTW.OMP.SWP.
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# GTW.OMP.SWP.Object_PN
#
# Purpose
# Model an object with a perma_name.
#
# Revision Dates
# 17-Jun-2013 (CT) Creation (recovered from late `_SWP.Entity` module)
# 28-Jan-2014 (CT) Add `hidden` and `prio` (move from `SWP.Page_Mixin`)
# 13-Mar-2015 (CT) Add `check` against `/` to `perma_name`
# 27-Feb-2015 (CT) Add `not_in_past` to `date.start` and `.finish`
# ««revision-date»»···
#--
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from _MOM.import_MOM import *
from _MOM._Attr.Date_Interval import *
from _GTW import GTW
import _GTW._OMP._SWP
_Ancestor_Essence = GTW.OMP.SWP.Object
class Object_PN (_Ancestor_Essence) :
"""Object with a perma_name."""
is_partial = True
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
### Primary attributes
class perma_name (A_Date_Slug) :
"""Name used for perma-link."""
kind = Attr.Primary
max_length = 80
ui_name = "Name"
check = \
( """' ' not in value"""
, """'/' not in value"""
)
# end class perma_name
### Non-primary attributes
class date (A_Date_Interval_N) :
"""Publication (`start`) and expiration date (`finish`)"""
class _Attributes :
_Overrides = dict \
( finish = dict (not_in_past = True)
, start = dict (not_in_past = True)
)
# end class _Attributes
kind = Attr.Optional
explanation = """
The page won't be visible before the start date.
After the finish date, the page won't be displayed (except
possibly in an archive).
"""
# end class date
class hidden (A_Boolean) :
"""Don't show page in navigation."""
kind = Attr.Optional
default = False
rank = 1
# end class hidden
class prio (A_Int) :
"""Higher prio sorts before lower prio."""
kind = Attr.Optional
Kind_Mixins = (Attr.Sticky_Mixin, )
default = 0
rank = 1
# end class prio
class short_title (A_String) :
"""Short title (used in navigation)."""
kind = Attr.Necessary
max_length = 30
# end class title
class title (A_String) :
"""Title of the web page"""
kind = Attr.Necessary
max_length = 120
# end class title
# end class _Attributes
# end class Object_PN
if __name__ != "__main__" :
GTW.OMP.SWP._Export ("*")
### __END__ GTW.OMP.SWP.Object_PN
| [
"tanzer@swing.co.at"
] | tanzer@swing.co.at |
36878b21074fb9d20051faa91f756787b0891e51 | 669e9241b02bdaa303fbc2fd4023b90d4d179a59 | /Beam Balance/base.py | 7b5195e30440e992c67459e272870ad27292cea1 | [] | no_license | benjaminpotter/HatchProjects | 0854cf46ae7c3781468116a5d63b703dd54ae68c | 7f6a948d3474c755d071751b725c059e6c7f3553 | refs/heads/master | 2022-01-28T16:58:03.449073 | 2019-08-16T13:47:30 | 2019-08-16T13:47:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 904 | py | ang = 0
speed = 0.1
def drawSky():
noStroke()
background(240, 247, 111)
def drawPole():
strokeWeight(35)
stroke(130, 74, 9)
line((ang * 0.2) % (width + 40) - 20, height, (ang * 0.2) % (width + 40) - 20, height - 60)
strokeWeight(12)
stroke(204, 146, 71)
line(0 , height - 48 + 5 * sin(ang), width, height - 48 + 5 * sin(ang + 90))
def drawStickman():
strokeWeight(5)
stroke(0, 0, 0)
fill(255, 255, 255)
line(200, 162 + 80, 185 + 40 * sin(ang), 270 + 79)
line(201, 162 + 80, 211 + 40 * sin(ang + 180), 270 + 82)
line(200, 163 + 80, 200, 136)
line(108, 89 + 20 * sin(ang), 200, 140)
line(203, 140, 300, 180 + 20 * sin(ang + 90))
ellipse(200, 100, 70, 70)
def updateAnimation():
global ang
global speed
ang += speed
speed += 0.1
def draw():
drawSky()
drawPole()
drawStickman()
updateAnimation()
| [
"noreply@github.com"
] | benjaminpotter.noreply@github.com |
31f8cbe67b99ed9802497e2ff656d8e43e2f6858 | 039600de1e5bc1d4f21e839b94ae6b2ab6a21a16 | /cosmogrb/utils/time_interval.py | 26760a5a79480dca709255f60d78d37c73d3f28a | [
"BSD-2-Clause"
] | permissive | wematthias/cosmogrb | a9d5df797efecdb43ae3748cb7ca8bbacf36138b | 09852eb4e6e7315bbede507e19a2d57f1b927c3f | refs/heads/master | 2021-04-02T18:23:07.188673 | 2020-06-09T12:24:28 | 2020-06-09T12:24:28 | 248,306,640 | 0 | 0 | BSD-2-Clause | 2020-06-08T15:02:04 | 2020-03-18T18:05:04 | Python | UTF-8 | Python | false | false | 16,891 | py | # from 3ML
import matplotlib.pyplot as plt
import copy
from operator import itemgetter, attrgetter
import numpy as np
from cosmogrb.utils.plotting import step_plot
class IntervalsDoNotOverlap(RuntimeError):
pass
class IntervalsNotContiguous(RuntimeError):
pass
class TimeInterval(object):
def __init__(self, start, stop, counts=None, dead_time=0, swap_if_inverted=False):
self._start = float(start)
self._stop = float(stop)
self._counts = counts
self._dead_time = dead_time
self._exposure = (self._stop - self._start) - dead_time
# Note that this allows to have intervals of zero duration
if self._stop < self._start:
if swap_if_inverted:
self._start = stop
self._stop = start
else:
raise RuntimeError(
"Invalid time interval! TSTART must be before TSTOP and TSTOP-TSTART >0. "
"Got tstart = %s and tstop = %s" % (start, stop)
)
def __add__(self, number):
"""
Return a new time interval equal to the original time interval shifted to the right by number
:param number: a float
:return: a new TimeInterval instance
"""
return TimeInterval(self._start + number, self._stop + number)
def __sub__(self, number):
"""
Return a new time interval equal to the original time interval shifted to the left by number
:param number: a float
:return: a new TimeInterval instance
"""
return TimeInterval(self._start - number, self._stop - number)
@property
def start(self):
return self._start
@property
def stop(self):
return self._stop
@property
def counts(self):
return self._counts
@property
def dead_time(self):
return self._dead_time
@property
def rate(self):
if self._counts is not None:
return self._counts / self._exposure
@property
def width(self):
return self._stop - self._start
@property
def exposure(self):
return self._exposure
@property
def mid_point(self):
return (self._start + self._stop) / 2.0
def __repr__(self):
return " interval %s - %s (width: %s)" % (self.start, self.stop, self.width,)
def intersect(self, interval):
"""
Returns a new time interval corresponding to the intersection between this interval and the provided one.
:param interval: a TimeInterval instance
:type interval: Interval
:return: new interval covering the intersection
:raise IntervalsDoNotOverlap : if the intervals do not overlap
"""
if not self.overlaps_with(interval):
raise IntervalsDoNotOverlap(
"Current interval does not overlap with provided interval"
)
new_start = max(self._start, interval.start)
new_stop = min(self._stop, interval.stop)
return TimeInterval(new_start, new_stop)
def merge(self, interval):
"""
Returns a new interval corresponding to the merge of the current and the provided time interval. The intervals
must overlap.
:param interval: a TimeInterval instance
:type interval : Interval
:return: a new TimeInterval instance
"""
if self.overlaps_with(interval):
new_start = min(self._start, interval.start)
new_stop = max(self._stop, interval.stop)
if (self._counts is not None) and (interval.counts is not None):
new_counts = self._counts + interval.counts
else:
new_counts = None
new_dead_time = self._dead_time + interval.dead_time
return TimeInterval(new_start, new_stop, new_counts, new_dead_time)
else:
raise IntervalsDoNotOverlap("Could not merge non-overlapping intervals!")
def overlaps_with(self, interval):
"""
Returns whether the current time interval and the provided one overlap or not
:param interval: a TimeInterval instance
:type interval: Interval
:return: True or False
"""
if interval.start == self._start or interval.stop == self._stop:
return True
elif interval.start > self._start and interval.start < self._stop:
return True
elif interval.stop > self._start and interval.stop < self._stop:
return True
elif interval.start < self._start and interval.stop > self._stop:
return True
else:
return False
def to_string(self):
"""
returns a string representation of the time interval that is like the
argument of many interval reading funcitons
:return:
"""
return "%f-%f" % (self.start, self.stop)
def __eq__(self, other):
if not isinstance(other, TimeInterval):
# This is needed for things like comparisons to None or other objects.
# Of course if the other object is not even a TimeInterval, the two things
# cannot be equal
return False
else:
return self.start == other.start and self.stop == other.stop
class TimeIntervalSet(object):
"""
A set of intervals
"""
def __init__(self, list_of_intervals=()):
self._intervals = list(list_of_intervals)
@classmethod
def from_starts_and_stops(cls, starts, stops, counts=None, dead_time=None):
"""
Builds a TimeIntervalSet from a list of start and stop times:
start = [-1,0] -> [-1,0], [0,1]
stop = [0,1]
:param starts:
:param stops:
:return:
"""
assert len(starts) == len(stops), (
"starts length: %d and stops length: %d must have same length"
% (len(starts), len(stops))
)
if counts is not None:
assert len(starts) == len(counts), (
"starts length: %d and counts length: %d must have same length"
% (len(starts), len(counts))
)
else:
counts = [None] * len(starts)
if dead_time is not None:
assert len(starts) == len(dead_time), (
"starts length: %d and dead_time length: %d must have same length"
% (len(starts), len(dead_time))
)
else:
dead_time = [0] * len(starts)
list_of_intervals = []
for imin, imax, ic, idt in zip(starts, stops, counts, dead_time):
list_of_intervals.append(TimeInterval(imin, imax, ic, idt))
return cls(list_of_intervals)
@classmethod
def from_list_of_edges(cls, edges):
"""
Builds a IntervalSet from a list of time edges:
edges = [-1,0,1] -> [-1,0], [0,1]
:param edges:
:return:
"""
raise NotImplementedError()
# # sort the time edges
# edges.sort()
# list_of_intervals = []
# for imin, imax in zip(edges[:-1], edges[1:]):
# list_of_intervals.append(cls.new_interval(imin, imax))
# return cls(list_of_intervals)
def merge_intersecting_intervals(self, in_place=False):
"""
merges intersecting intervals into a contiguous intervals
:return:
"""
# get a copy of the sorted intervals
sorted_intervals = self.sort()
new_intervals = []
while len(sorted_intervals) > 1:
# pop the first interval off the stack
this_interval = sorted_intervals.pop(0)
# see if that interval overlaps with the the next one
if this_interval.overlaps_with(sorted_intervals[0]):
# if so, pop the next one
next_interval = sorted_intervals.pop(0)
# and merge the two, appending them to the new intervals
new_intervals.append(this_interval.merge(next_interval))
else:
# otherwise just append this interval
new_intervals.append(this_interval)
# now if there is only one interval left
# it should not overlap with any other interval
# and the loop will stop
# otherwise, we continue
# if there was only one interval
# or a leftover from the merge
# we append it
if sorted_intervals:
assert (
len(sorted_intervals) == 1
), "there should only be one interval left over, this is a bug" # pragma: no cover
# we want to make sure that the last new interval did not
# overlap with the final interval
if new_intervals:
if new_intervals[-1].overlaps_with(sorted_intervals[0]):
new_intervals[-1] = new_intervals[-1].merge(sorted_intervals[0])
else:
new_intervals.append(sorted_intervals[0])
else:
new_intervals.append(sorted_intervals[0])
if in_place:
self.__init__(new_intervals)
else:
return TimeIntervalSet(new_intervals)
def extend(self, list_of_intervals):
self._intervals.extend(list_of_intervals)
def __len__(self):
return len(self._intervals)
def __iter__(self):
for interval in self._intervals:
yield interval
def __getitem__(self, item):
return self._intervals[item]
def __eq__(self, other):
for interval_this, interval_other in zip(self.argsort(), other.argsort()):
if not self[interval_this] == other[interval_other]:
return False
return True
def pop(self, index):
return self._intervals.pop(index)
def sort(self):
"""
Returns a sorted copy of the set (sorted according to the tstart of the time intervals)
:return:
"""
if self.is_sorted:
return copy.deepcopy(self)
else:
return TimeIntervalSet(
np.atleast_1d(itemgetter(*self.argsort())(self._intervals))
)
def argsort(self):
"""
Returns the indices which order the set
:return:
"""
# Gather all tstarts
tstarts = [x.start for x in self._intervals]
return [x[0] for x in sorted(enumerate(tstarts), key=itemgetter(1))]
def is_contiguous(self, relative_tolerance=1e-5):
"""
Check whether the time intervals are all contiguous, i.e., the stop time of one interval is the start
time of the next
:return: True or False
"""
starts = [attrgetter("start")(x) for x in self._intervals]
stops = [attrgetter("stop")(x) for x in self._intervals]
return np.allclose(starts[1:], stops[:-1], rtol=relative_tolerance)
@property
def is_sorted(self):
"""
Check whether the time intervals are sorted
:return: True or False
"""
return np.all(self.argsort() == np.arange(len(self)))
def containing_bin(self, value):
"""
finds the index of the interval containing
:param value:
:return:
"""
# Get the index of the first ebounds upper bound larger than energy
# (but never go below zero or above the last channel)
idx = min(max(0, np.searchsorted(self.edges, value) - 1), len(self))
return idx
def containing_interval(self, start, stop, inner=True, as_mask=False):
"""
returns either a mask of the intervals contained in the selection
or a new set of intervals within the selection. NOTE: no sort is performed
:param start: start of interval
:param stop: stop of interval
:param inner: if True, returns only intervals strictly contained within bounds, if False, returns outer bounds as well
:param as_mask: if you want a mask or the intervals
:return:
"""
# loop only once because every call unpacks the array
# we need to round for the comparison because we may have read from
# strings which are rounded to six decimals
starts = np.round(self.starts, decimals=6)
stops = np.round(self.stops, decimals=6)
start = np.round(start, decimals=6)
stop = np.round(stop, decimals=6)
condition = (starts >= start) & (stop >= stops)
if not inner:
# now we get the end caps
lower_condition = (starts <= start) & (start <= stops)
upper_condition = (starts <= stop) & (stop <= stops)
condition = condition | lower_condition | upper_condition
# if we just want the mask
if as_mask:
return condition
else:
return TimeIntervalSet(np.asarray(self._intervals)[condition])
@property
def starts(self):
"""
Return the starts fo the set
:return: list of start times
"""
return np.array([interval.start for interval in self._intervals])
@property
def stops(self):
"""
Return the stops of the set
:return:
"""
return np.array([interval.stop for interval in self._intervals])
@property
def counts(self):
return np.array([interval.counts for interval in self._intervals])
@property
def rates(self):
return np.array([interval.rate for interval in self._intervals])
@property
def exposures(self):
return np.array([interval.exposure for interval in self._intervals])
@property
def mid_points(self):
return np.array([interval.mid_point for interval in self._intervals])
@property
def widths(self):
return np.array([interval._get_width() for interval in self._intervals])
@property
def absolute_start(self):
"""
the minimum of the start times
:return:
"""
return min(self.starts)
@property
def absolute_stop(self):
"""
the maximum of the stop times
:return:
"""
return max(self.stops)
@property
def edges(self):
"""
return an array of time edges if contiguous
:return:
"""
if self.is_contiguous() and self.is_sorted:
edges = [
interval.start
for interval in itemgetter(*self.argsort())(self._intervals)
]
edges.append(
[
interval.stop
for interval in itemgetter(*self.argsort())(self._intervals)
][-1]
)
else:
raise IntervalsNotContiguous(
"Cannot return edges for non-contiguous intervals"
)
return edges
def to_string(self):
"""
returns a set of string representaitons of the intervals
:return:
"""
return ",".join([interval.to_string() for interval in self._intervals])
@property
def bin_stack(self):
"""
get a stacked view of the bins [[start_1,stop_1 ],
[start_2,stop_2 ]]
:return:
"""
return np.vstack((self.starts, self.stops)).T
def __add__(self, number):
"""
Shift all time intervals to the right by number
:param number: a float
:return: new TimeIntervalSet instance
"""
new_set = TimeIntervalSet()
new_set.extend([time_interval + number for time_interval in self._intervals])
return new_set
def __sub__(self, number):
"""
Shift all time intervals to the left by number (in place)
:param number: a float
:return: new TimeIntervalSet instance
"""
new_set = TimeIntervalSet(
[time_interval - number for time_interval in self._intervals]
)
return new_set
def plot_intervals(self, as_rates=True, ax=None, **kwargs):
"""
plot the intervals as rates or counts
:param as_rates:
:param ax:
:returns:
:rtype:
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
xbins = self.bin_stack
if as_rates:
y = self.rates
y_label = "rate (cnts/s)"
else:
y = self.counts
y_label = "cnts"
step_plot(xbins, y, ax=ax, **kwargs)
ax.set_xlabel("time")
ax.set_ylabel(y_label)
return fig
| [
"jmichaelburgess@gmail.com"
] | jmichaelburgess@gmail.com |
20d720a8873fba8e271892d0efe124a4d83e0a6d | 4139cc9048db5fcc6e2a74b1c736e33513705d6b | /exec.py | 5bd7076c13f97fc4c5509cd542bca960724f91e9 | [] | no_license | rtt987653/stunning-waffle | 1e2921c7b6c1021d13844aa4a7f5d8ef11c8093b | d5c5b856a74ca2b45a5ac22dc8daf48865a3492c | refs/heads/master | 2020-06-17T03:21:29.718349 | 2019-07-08T09:23:20 | 2019-07-08T09:23:20 | 195,778,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | import sys
import hashlib
import binascii
import os
import base64
import re
password = "blah"
salt = "blah"
exec sys.argv[1]
dk = hashlib.pbkdf2_hmac('sha256', password, salt, 100000)
hashed_input = binascii.hexlify(dk)
print hashed_input | [
"rtt@20190830.rtt987653.nccpentest.com"
] | rtt@20190830.rtt987653.nccpentest.com |
c3292163e54a72daf2ef68b081fe8871e31995ff | 6e23e1c235b2d7acdb4a7ee06909e5d4ab95f35f | /lol-leveling-bot/regions.py | b6c87c3b46f4dfe361daa2508d02be2a190fa9e0 | [] | no_license | 00-00-00-11/lol-leveling-bot | 498576cde55b041b124f4aa5a87250f69eeec71a | 05b85e549046f59b946fd4dd07f26fe16089ef3b | refs/heads/master | 2023-02-27T09:21:10.711958 | 2021-02-04T23:14:53 | 2021-02-04T23:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,826 | py |
# Champion select region
champ_select_left = (343, 131)
champ_select_right = (934, 583)
# To be removed after daily play is updated to pixel
UPPER_HALF_RECT = [0, 0, 1280, 360]
LOWER_HALF_RECT = [0, 360, 1280, 360]
# Champion Colors
ashe_color = (214, 179, 211)
annie_color = (234, 149, 163)
# Button Coordinates
play_coords = (114, 40)
party_coords = (127, 41)
coop_vs_ai_coords = (121, 99)
intermediate_coords = (418, 555)
confirm_coords = (512, 686)
find_match_coords = (621, 673)
accept_coords = (638, 531)
lock_in_coords = (637, 589)
champ_select_coords = (727, 594)
champ_locked_coords = (328, 295)
game_recall_coords = (825, 711)
game_trinket_coords = (825, 685)
game_end_coords = (674, 453)
game_health_coords = (582, 722)
game_lockscreen_coords = (1068, 735)
skip_honor_coords = (641, 649)
play_again_coords = (535, 676)
ok_levelup_coords = (638, 679)
daily_play_coords = (608, 69)
daily_play_thresh_coords = (651, 309)
daily_play_middle_select_coords = (679, 381)
daily_play_ok_coords = (642, 677)
riot_client_play_coords = (885, 350)
# Button Colors
play_party_color = (236, 226, 204)
coop_vs_ai_color = (172, 183, 136)
intermediate_color = (13, 31, 39)
confirm_color = (29, 33, 37)
find_match_color = (16, 22, 27)
accept_color = (10, 16, 22)
lock_in_color = (45, 153, 164)
champ_select_color = (75, 58, 24)
champ_locked_color = (203, 169, 96)
game_recall_color = (189, 247, 255)
game_trinket_color = (5, 13, 11)
game_end_color = (165, 16, 16)
game_health_color = (1, 13, 7)
game_dead_color = (107, 119, 120)
skip_honor_color = (190, 178, 132)
play_again_color = (29, 33, 37)
ok_levelup_color = (184, 172, 128)
daily_play_color = (233, 223, 201)
daily_play_thresh_color = (34, 115, 92)
daily_play_middle_select_color = (29, 32, 36)
daily_play_ok_color = (194, 181, 134)
riot_client_play_color = (11, 196, 226) | [
"tplevesque2@gmail.com"
] | tplevesque2@gmail.com |
565ec351cfb9ac98ccaf321abdedfcdf21f3d13a | 3821904d92258b17117e55888078256659b00d95 | /Modelling.py | f8fb8e2d6d6addfb814cacd7cd470199d663e888 | [] | no_license | Alex-Veskoukis/Data-Science-Bowl-2019 | e50726118ae4d67842861b1855f55286dff51e2a | f11191bd5d7e578e466c7507880e5b33be52ce10 | refs/heads/master | 2022-04-08T10:20:04.810722 | 2022-04-02T12:32:01 | 2022-04-02T12:32:01 | 223,137,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,261 | py | import auxiliary_functions as af
from sklearn.ensemble import RandomForestClassifier
from mlxtend.classifier import EnsembleVoteClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.ensemble import AdaBoostClassifier
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import ExtraTreesClassifier
from xgboost import XGBClassifier
import
from sklearn.metrics import confusion_matrix, accuracy_score, cohen_kappa_score
import seaborn as sns
classifiers = []
model1 = xgb.XGBClassifier()
classifiers.append(model1)
model2 = af.OrdinalClassifier(xgb.XGBClassifier())
classifiers.append(model2)
model3 = RandomForestClassifier(n_estimators=1000, n_jobs=-1, random_state=42)
classifiers.append(model3)
model4 = af.OrdinalClassifier(RandomForestClassifier(n_estimators=83, n_jobs=-1, random_state=42))
classifiers.append(model4)
model5 = EnsembleVoteClassifier(clfs=[model1, model3], weights=[1, 1], refit=False)
classifiers.append(model5)
model6 = EnsembleVoteClassifier(clfs=[model2, model4], weights=[1, 1], refit=False)
classifiers.append(model6)
model7 = af.OrdinalClassifier(LogisticRegression(max_iter = 10000))
classifiers.append(model7)
# model8 = SVC()
# classifiers.append(model8)
#
model9 = KNeighborsClassifier(n_neighbors = 8)
classifiers.append(model9)
#
# model10 = LinearDiscriminantAnalysis()
# classifiers.append(model10)
#
#
# model11 = GaussianNB()
# classifiers.append(model10)
#
#
# model13 = DecisionTreeClassifier()
# classifiers.append(model13)
# #
model14 = MLPClassifier(max_iter = 10000)
classifiers.append(model14)
#
# # clf = GaussianProcessClassifier()
# # classifiers.append(model15)
#
#
# model16 = RBF()
# classifiers.append(model16)
#
model17 = AdaBoostClassifier()
classifiers.append(model17)
#
# model18 = QuadraticDiscriminantAnalysis()
# classifiers.append(model18)
model19 = ExtraTreesClassifier()
classifiers.append(model19)
#
model10 = EnsembleVoteClassifier(clfs=[model2, model4, model7, model9], weights=[1, 1], refit=False)
classifiers.append(model10)
#
X_train_sub = X_train
X_test_sub = X_test
# col8 = col8.tolist()
# col9 = col9.tolist()
# col10 = col10.tolist()
# col11 = col11.tolist()
# col12 = col12.tolist()
# col13 = col13.tolist()
# ex = col8 + col9 + col10 + col11 + col12 +col13
# X_train_sub = X_train.loc[:,~X_train.columns.isin(ex)]
# X_test_sub = X_test.loc[:,~X_test.columns.isin(ex)]
kappa_train = []
kappa_test = []
accuracy_train = []
accuracy_test = []
for clf in classifiers:
print(clf)
clf.fit(X_train_sub, Y_train)
Y_pred_train = clf.predict(X_train_sub)
Y_pred_test = clf.predict(X_test_sub[X_train_sub.columns])
kappa_train.append(af.quadratic_weighted_kappa(Y_train, Y_pred_train))
kappa_test.append(af.quadratic_weighted_kappa(Y_test, Y_pred_test))
accuracy_train.append(accuracy_score(Y_train, Y_pred_train))
accuracy_test.append(accuracy_score(Y_test, Y_pred_test))
# RFE
from sklearn.feature_selection import RFE
#no of features
nof_list=np.arange(1,100)
high_score=0
#Variable to store the optimum features
nof=0
score_list =[]
for n in range(len(nof_list)):
model = RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=42)
rfe = RFE(model,nof_list[n])
X_train_rfe = rfe.fit_transform(X_train,Y_train)
X_test_rfe = rfe.transform(X_test)
model.fit(X_train_rfe,Y_train)
score = model.score(X_test_rfe,Y_test)
score_list.append(score)
if(score>high_score):
high_score = score
nof = nof_list[n]
print("Optimum number of features: %d" %nof)
print("Score with %d features: %f" % (nof, high_score))
# Run RF classifier
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=83, n_jobs=-1, random_state=42)
rfe = RFE(rf, 3)
fit = rfe.fit(X_train, Y_train)
print("Num Features: %d" % fit.n_features_)
print("Selected Features: %s" % fit.support_)
print("Feature Ranking: %s" % fit.ranking_)
Y_pred = fit.predict(X_test)
af.quadratic_weighted_kappa(Y_test, Y_pred)
cohen_kappa_score(Y_test, Y_pred)
# importance
importances = rf.feature_importances_
from sklearn import metrics
from sklearn.ensemble import ExtraTreesClassifier
# load the iris datasets
# fit an Extra Trees model to the data
model = ExtraTreesClassifier()
model.fit(X_train, Y_train)
Y_pred = model.predict(X_test)
cohen_kappa_score(Y_test, Y_pred)
# display the relative importance of each attribute
print(model.feature_importances_)
rfe = RFE(rf, 10)
rfe = rfe.fit(X_train, Y_train)
# summarize the selection of the attributes
print(rfe.support_)
print(rfe.ranking_)
# Tune rf
n_estimators = range(1, 100)
train_results = []
test_results = []
for estimator in n_estimators:
rf = RandomForestClassifier(n_estimators=estimator, n_jobs=-1, random_state=42)
rf.fit(X_train, Y_train)
train_pred = rf.predict(X_train)
y_pred = rf.predict(X_test)
test_results.append(af.quadratic_weighted_kappa(Y_test, y_pred))
train_results.append(af.quadratic_weighted_kappa(Y_train, train_pred))
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
line1, = plt.plot(n_estimators, train_results, 'b', label='Train kappa')
line2, = plt.plot(n_estimators, test_results, 'r', label='Test kappa')
plt.legend(handler_map={line1: HandlerLine2D(numpoints=2)})
plt.ylabel('Kappa score')
plt.xlabel('n_estimators')
plt.show()
plt.close()
# from sklearn.model_selection import RandomizedSearchCV
# from sklearn.metrics import make_scorer
# rf_params = {
# 'n_estimators': range(5,100),
# 'max_features': ['auto', 'sqrt', 'log2'],
# }
#
# gs_random = RandomizedSearchCV(estimator=rf, param_distributions=rf_params, cv= 5, n_iter=60, scoring=make_scorer(quadratic_weighted_kappa, greater_is_better=True))
#
# gs_random.fit(X_train, Y_train)
#
# print(gs_random.best_params_)
# voting classifiers
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
# clf1 = MLPClassifier(solver='lbfgs', alpha=1e-5,
# hidden_layer_sizes=(5, 2), random_state=1)
# eclf1 = clf1.fit(X_train, Y_train)
# Y_pred1 = eclf1.predict(X_test)
# af.quadratic_weighted_kappa(Y_test, Y_pred1)
clf2 = RandomForestClassifier(n_estimators=33, n_jobs=-1, random_state=42)
eclf2 = clf2.fit(X_train, Y_train)
Y_pred2 = eclf2.predict(X_test)
af.quadratic_weighted_kappa(Y_test, Y_pred2)
clf3 = VotingClassifier(estimators=[('mlp', clf1), ('rf', clf2)],
voting='hard')
eclf3 = clf3.fit(X_train, Y_train)
Y_pred3 = eclf3.predict(X_test)
af.quadratic_weighted_kappa(Y_test, Y_pred3)
sns.set()
mat = confusion_matrix(Y_pred, Y_test)
sns.heatmap(mat, square=True, annot=True, cbar=False)
plt.xlabel('predicted values')
plt.ylabel('true value');
af.mat(X_train, Y_train)
X_train.to_numpy
| [
"alex.veskoukis@hotmail.gr"
] | alex.veskoukis@hotmail.gr |
abf40f15a8ebe352af0fcbc8ebd565e54da121e0 | 956eb69ed44db4303bcce321f061dea5ea18371e | /bin/python-config | 9f83f1d3a634a2d69a5110b16bbf2ca52a664cba | [] | no_license | mgznv/intro-github-ubuntu | 975352b6b006d58f3010ac4e6a91851062b9ab1b | 36c6fe0152077324517e1f724c4f090ea7342e91 | refs/heads/master | 2021-01-10T06:30:50.252110 | 2016-02-22T16:08:50 | 2016-02-22T16:08:50 | 52,285,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,343 | #!/home/manuel/__proyecto__/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"leunam.toshi@gmail.com"
] | leunam.toshi@gmail.com | |
615e09be2ec42e5816dfe528abd05e9ec222fdde | 35bdc3bca1c1993db585b2d388a5e1a6aa7b8c34 | /pbc.py | b6adfaac498413d06ba9a3aee949f4eff51e4cf8 | [] | no_license | boudiccadaain/data-analyst-toolkit | 1feaad5c8c43c6e0009e909942ab6894f8e0646d | 215552a4ec4680e2b77bb45a8228894ea14ca3db | refs/heads/master | 2023-03-24T04:11:33.612202 | 2019-09-25T00:58:22 | 2019-09-25T00:58:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | import os
import utils
def list_files(parent_directory):
file_list = []
for path, dirs, files in os.walk(parent_directory):
for file in files:
file_list.append(os.path.join(path, file))
return file_list
class FileInfo:
def __init__(self, file_path):
self.file_stats = os.stat(file_path)
self.file_name = os.path.splitext(file_path)[1]
self.create_date = utils.convert_time_to_date(self.file_stats.st_ctime)
self.create_time = utils.convert_time_to_time(self.file_stats.st_ctime)
self.modify_date = utils.convert_time_to_date(self.file_stats.st_mtime)
self.modify_time = utils.convert_time_to_time(self.file_stats.st_mtime)
self.size_in_megabytes = self.file_stats.st_size/1000
# TODO: Finish PBC log function
def pbc_log(path, output_name, file_type=None, delimiter='|', destination=None):
print('Creating inventory list of the following directory and subdirectories:')
print(''.join(path, '\n'))
if '.' in output_name:
output_file_type = output_name
return output_name
| [
"dallen.mcnerney@gmail.com"
] | dallen.mcnerney@gmail.com |
c2d2edc87a7c610fcf3ad33dfb538bdf3a349ecd | 9ca5fd0228f262807a158038f038d5389b04762b | /김부용/eye blinking password/settings.py | 9cf1f843209c410bd75ad40b1a3e580c2de83150 | [] | no_license | KBY538/2021SMU-DL-eyeblinkingpassword | 4c9033837b9bffcfadaa8d345526446a2b9486a2 | efbe46ce0c97225c4ffb7f1135bd931ad514a614 | refs/heads/main | 2023-05-13T09:28:39.891733 | 2021-06-03T00:18:18 | 2021-06-03T00:18:18 | 356,547,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | #base properties
TITLE = "[Team.느낌알조~?]눈깜빡임잠금해제"
WIDTH = 640
HEIGHT = 480
FPS = 20
MODEL_VER = '2021_05_21_00_52_40.h5'
PATTERN_DATA_PATH = 'blinking/pattern.ini'
#Font
FONT_NAME = 'arial'
GG = "경기천년바탕OTF_Regular.otf"
#sound
INTRO = "Small Bell Jingle.mp3"
MAIN = "Poisoned Rose - Aakash Gandhi.mp3"
ENDING = "The New Darker You - The Tower of Light.mp3"
METRONOME = "메트로놈_60bpm.mp3"
KEY = "key.mp3"
DECISION = "decision.mp3"
EXIT = "exit.mp3"
SUCCESS = "success.mp3"
FAIL = "fail.mp3"
APPLAUSE = "applause.mp3"
WAITING = "Down - Joey Pecoraro.mp3"
BLINK = "blink.mp3"
COUNT = "count.mp3"
READY = "buzzer-opening1.mp3"
GOOD = "Always Remember to Never Forget - The Whole Other.mp3"
#Select properties
SELECT_ACC = 4
#define colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GRAY = (100, 100, 100)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255 ,255 ,0)
BROWN = (111, 109, 81)
ALPHA_MAX = 225 | [
"80605197+KBY538@users.noreply.github.com"
] | 80605197+KBY538@users.noreply.github.com |
c0b142fedc26e869d35ca35ac826b4138e953b8b | ed2ea2ce99a65472f8aad25c6bedcd0dd7d00829 | /cogs/setcommands.py | 4ba8562542de980a0b46587d51bca24f19f646d3 | [
"MIT"
] | permissive | SottaByte98/ham5teakbot3.0 | f8c9f03ae75c84c84b665f2bd8954d698c042fe3 | 2ee76c89fc9a8755b0d7a9cfbbe29638f021daca | refs/heads/main | 2023-07-14T20:28:24.163072 | 2021-08-20T17:13:58 | 2021-08-20T17:13:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,492 | py | import discord
from discord.ext import commands
from luhn import *
from utils.functions import *
class SetCommandCog(commands.Cog):
def __init__(self, client):
self.bot = client
@commands.command()
@commands.has_permissions(manage_guild=True)
@commands.guild_only()
async def setprefix(self, ctx, prefix = None):
administratorcheck1 = await administratorcheck(ctx.guild, ctx.author)
if administratorcheck1 == 0:
await ctx.send(embed=await nopermission(ctx), delete_after=5)
return
await deletemessage(ctx)
if prefix is None:
embedDescription = (f"Please provide all required arguments. `-setprefix <prefix>`.")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
if len(prefix) >= 6:
embedDescription = (f"{prefix} has too many characters for a prefix.")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
if ctx.guild.id in premium_guilds:
insertquery(sql, "guilds", "prefix", f"'{prefix}'", f"guild_id = {ctx.guild.id}")
prefixes[f"{ctx.guild.id}"] = prefix
elif ctx.guild.id not in premium_guilds:
prefixes[f"{ctx.guild.id}"] = prefix
else:
return
embedDescription = (f"Prefix succesfully set to `{prefix}`")
await ctx.send(embed=addEmbed(ctx,"dark_teal",embedDescription ), delete_after=5)
@commands.command()
@discord.ext.commands.has_guild_permissions(manage_guild=True)
@commands.cooldown(1, 15, commands.BucketType.user)
async def setup(self, ctx, password, admin_role_id:discord.Role, mod_role_id:discord.Role):
await deletemessage(ctx)
async def invalidpass():
embedDescription = (f"Invalid password.")
await ctx.send(embed=addEmbed(ctx,"dark_teal",embedDescription), delete_after=5)
cc = password[:16]
password = password[16:].strip()
if verify(cc) == False:
print("cc fail")
await invalidpass()
return
elif verify(cc) == True:
check = selectqueryall(sql, f'passwords', 'password', None)
found = False
for pass1 in check:
if pass1[0] == password:
found = True
if found != True:
print("pass doesn't exist")
await invalidpass()
return
check2 = selectquery(sql, f'passwords', 'used', f"password = '{password}'")
if check2 == 1:
embedDescription = (f"This password was already used.")
await ctx.send(embed=addEmbed(ctx,"dark_teal",embedDescription ))
return
elif check2 == 0:
insertquery(sql, f'passwords', 'used', '(1)', f'password = "{str(password)}"')
insertquery(sql, f'passwords', 'guild_id', f'{ctx.guild.id}', f'password = "{str(password)}"')
guild_id = ctx.guild.id
if guild_id in premium_guilds:
embedDescription = (f"You are already Logged in as Premium")
await ctx.send(embed=addEmbed(ctx,"blue",embedDescription ))
return
else:
guild_name = ctx.guild.name
column = '(guild_id , guild_name , premium , administrator_id , moderator_id)'
values = (guild_id , guild_name , True , admin_role_id.id , mod_role_id.id)
where = None
result = (insertquery(sql, 'guilds' , column , values, where))
premium_guilds.append(ctx.guild.id)
if result == 0:
embedDescription = (f"Registered successfully")
await ctx.send(embed=addEmbed(ctx,"green",embedDescription ), delete_after=5)
else:
embedDescription = (f"Register Failed")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
@commands.command()
@discord.ext.commands.has_guild_permissions(manage_guild=True)
@commands.cooldown(1, 5, commands.BucketType.user)
async def setrestrict(self, ctx, alias ,role1:discord.Role, role2:discord.Role = None, role3:discord.Role = None):
await deletemessage(ctx)
guild_id = ctx.guild.id
administratorcheck1 = await administratorcheck(ctx.guild, ctx.author)
if administratorcheck1 == 0:
await ctx.send(embed=await nopermission(ctx), delete_after=5)
return
if guild_id not in premium_guilds:
embedDescription = (f"You premium to use this command.")
await ctx.send(embed=addEmbed(ctx,"blue",embedDescription ), delete_after=5)
return
restricttypes = selectqueryall(sql, f'restrict', 'restrictrole_name', f'guild_id = {ctx.guild.id}')
for stralias in restricttypes:
if alias == stralias[0]:
embedDescription =(f"Restrict type `{alias}` already exists.")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
else:
if role3 is None:
if role2 is None:
column = '(guild_id , restrictrole_name , restrictrole_id)'
values = (guild_id , alias , role1.id)
where = None
result = (insertquery(sql, f'restrict' , column , values, where))
sql.connect()
querycursor = sql.cursor()
sql.commit()
querycursor.close()
if (result == 0):
embedDescription = (f"Restrict `{alias}` successfully set as {role1.mention}")
await ctx.send(embed=addEmbed(ctx,"green",embedDescription ), delete_after=5)
else:
embedDescription = (f"Restrict `{alias}` failed to set as {role1.mention}")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
column = '(guild_id , restrictrole_name , restrictrole_id , restrictrole2_id)'
values = (guild_id , alias , role1.id , role2.id)
where = None
result = (insertquery(sql, f'restrict' , column , values, where))
sql.connect()
querycursor = sql.cursor()
sql.commit()
querycursor.close()
if (result == 0):
embedDescription = (f"Restrict `{alias}` successfully set as {role1.mention} and {role2.mention}")
await ctx.send(embed=addEmbed(ctx,"green",embedDescription ), delete_after=5)
else:
embedDescription = (f"Restrict `{alias}` failed to set as {role1.mention} and {role2.mention}")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
column = '(guild_id , restrictrole_name , restrictrole_id , restrictrole2_id, restrictrole3_id)'
values = (guild_id , alias , role1.id , role2.id, role3.id)
where = None
result = (insertquery(sql, f'restrict' , column , values, where))
sql.connect()
querycursor = sql.cursor()
sql.commit()
querycursor.close()
if (result == 0):
embedDescription = (f"Restrict `{alias}` successfully set as {role1.mention}, {role2.mention} and {role3.mention}")
await ctx.send(embed=addEmbed(ctx,"green",embedDescription ), delete_after=5)
else:
embedDescription = (f"Restrict `{alias}` failed to set as {role1.mention}, {role2.mention} and {role3.mention}")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def setserver(self, ctx, serverip: str):
administratorcheck1 = await administratorcheck(ctx.guild, ctx.author)
if administratorcheck1 == 0:
await ctx.send(embed=await nopermission(ctx), delete_after=5)
return
await deletemessage(ctx)
try:
server = MinecraftServer.lookup(serverip)
status = server.status()
except:
embedDescription = (f"Couldn't register {serverip} as server ip.")
await ctx.channel.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
guild_id = str(ctx.guild.id)
where = (f"guild_id = {guild_id}")
result = (insertquery(sql, 'guilds', 'mcserver', f"'{serverip}'", where))
if (result == 0):
embedDescription = (f"Successfully registered {serverip} as server ip.")
await ctx.channel.send(embed=addEmbed(ctx,"green",embedDescription ), delete_after=5)
else:
embedDescription = (f"Couldn't register {serverip} as server ip.")
await ctx.channel.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
async def setchannel(self, ctx, command, channel: discord.TextChannel):
administratorcheck1 = await administratorcheck(ctx.guild, ctx.author)
if administratorcheck1 == 0:
await ctx.send(embed=await nopermission(ctx), delete_after=5)
return
await deletemessage(ctx)
guild_id = ctx.guild.id
channelid = str(channel.id)
commandsloop = ["statuschannel", "alertschannel", "lpalertschannel", "crashalertschannel", "generalchannel"
, "rejectedsuggestions", "acceptedsuggestions", "demandedsuggestions"]
for commanda in commandsloop:
if commanda == command:
column = (command)
values = (channelid)
where = (f"guild_id = {guild_id}")
result = (insertquery(sql, 'guilds', column , values, where))
if (result == 0):
embedDescription = (f"Successfully registered {command} as `{channel.id}`")
await ctx.channel.send(embed=addEmbed(ctx,"green",embedDescription ), delete_after=5)
else:
embedDescription = (f"Couldn't register {command} as {channelid}")
await ctx.channel.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
commandsloop2 = ["announcement", "poll", "suggestion"]
for commanda in commandsloop2:
try:
if commanda == command:
try:
list11 = selectqueryall(sql, 'announcements', 'channel_id', f'guild_id = {ctx.guild.id}')
for item in list11:
if item == channel.id:
embedDescription = (f"{channelid} is already registered.")
await ctx.channel.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
except:
pass
column = '(guild_id , channel_id , channel_type)'
values = (guild_id , channelid , command)
result = (insertquery(sql, 'announcements', column , values, None))
if ctx.guild.id not in announcementschannels.keys() or ctx.guild.id not in suggestionchannels.keys() or ctx.guild.id not in pollchannels.keys():
announcementschannels[ctx.guild.id] = []
if command == "announcement":
announcementschannels[ctx.guild.id].append(channel.id)
elif command == "suggestion":
suggestionchannels[ctx.guild.id].append(channel.id)
elif command == "poll":
pollchannels[ctx.guild.id].append(channel.id)
if (result == 0):
embedDescription = (f"Successfully registered {command} as `{channel.id}`")
await ctx.channel.send(embed=addEmbed(ctx,"green",embedDescription ), delete_after=5)
else:
embedDescription = (f"Couldn't register {command} as {channelid}")
await ctx.channel.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
except Exception as e:
print(e)
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.has_permissions(manage_guild=True)
async def setmove(self, ctx, categoryi: discord.CategoryChannel, alias):
administratorcheck1 = await administratorcheck(ctx.guild, ctx.author)
if administratorcheck1 == 0:
await ctx.send(embed=await nopermission(ctx), delete_after=5)
return
movecheck = selectquery(sql, 'guilds', 'custommovecount', f'guild_id = {ctx.guild.id}')
if movecheck >= 45:
await ctx.send(embed=addEmbed(ctx, None, f"Guild has {movecheck} custommoves set which is over the limit."), delete_after=5)
return
await deletemessage(ctx)
guild_id = ctx.guild.id
categoryid = str(categoryi.id)
categoryname = str(alias).replace('"', '').replace("'", "")
if guild_id not in premium_guilds:
embedDescription = (f"You need premium to use this command.")
await ctx.send(embed=addEmbed(ctx,"blue",embedDescription ))
return
categoryn = selectqueryall(sql, 'categories', 'category_name', f'guild_id = {ctx.guild.id}')
for stralias in categoryn:
if categoryname == stralias[0]:
embedDescription =(f"Category `{categoryname}` already exists.")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
column = '(guild_id, category_name)'
values = (guild_id, categoryname)
where = None
result = (insertquery(sql, 'categories', column , values, where))
column = ('category_id')
values = (f"'{categoryid}'")
where = (f"category_name = '{categoryname}'")
result = (insertquery(sql, 'categories', column , values, where))
if (result == 0):
embedDescription =(f"Successfully registered {categoryname} as `{categoryid}`")
await ctx.send(embed=addEmbed(ctx,None,embedDescription ), delete_after=5)
insertquery(sql, 'guilds', 'custommovecount', f'{len(categoryn) + 1}', f'guild_id = {ctx.guild.id}')
else:
embedDescription =(f"Couldn't register {categoryname} as `{categoryid}`")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.has_permissions(manage_guild=True)
async def removerestrict(self, ctx, alias):
administratorcheck1 = await administratorcheck(ctx.guild, ctx.author)
if administratorcheck1 == 0:
await ctx.send(embed=await nopermission(ctx), delete_after=5)
return
await deletemessage(ctx)
restrictname = alias
restrictlist = selectqueryall(sql, f'restrict', 'restrictrole_name', f'guild_id = {ctx.guild.id}')
for stralias in restrictlist:
if restrictname == stralias[0]:
deletequery(sql, f'restrict', f"restrictrole_name = '{restrictname}'")
embedDescription =(f"Restriction type `{restrictname}` has been removed.")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
embedDescription =(f"Restriction type `{restrictname}` couldn't be removed.")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.has_permissions(manage_guild=True)
async def removemove(self, ctx, alias):
administratorcheck1 = await administratorcheck(ctx.guild, ctx.author)
if administratorcheck1 == 0:
await ctx.send(embed=await nopermission(ctx), delete_after=5)
return
await deletemessage(ctx)
categoryname = alias
categoryn = selectqueryall(sql, 'categories', 'category_name', f'guild_id = {ctx.guild.id}')
for stralias in categoryn:
if categoryname == stralias[0]:
categoryn = deletequery(sql, 'categories', f"category_name = '{categoryname}'")
embedDescription =(f"Category `{categoryname}` has been removed.")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
embedDescription =(f"Category `{categoryname}` couldn't be removed.")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
@commands.command()
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.has_permissions(manage_guild=True)
async def removechannel(self, ctx, value: discord.TextChannel, channel):
administratorcheck1 = await administratorcheck(ctx.guild, ctx.author)
if administratorcheck1 == 0:
await ctx.send(embed=await nopermission(ctx), delete_after=5)
return
await deletemessage(ctx)
categoryname = channel
channelid = value.id
channels = selectqueryall(sql, 'announcements', 'channel_id', f'channel_type = "{channel}"')
for stralias in channels:
if channelid == stralias[0]:
if channel == "announcement":
announcementschannels[ctx.guild.id].remove(value.id)
elif channel == "poll":
pollchannels[ctx.guild.id].remove(value.id)
elif channel == "suggestion":
suggestionchannels[ctx.guild.id].remove(value.id)
deletequery(sql, 'announcements', f"channel_id = {value.id} AND channel_type = '{channel}'")
embedDescription =(f"<#{value.id}> has been removed from `{categoryname}`.")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
embedDescription =(f"<#{value.id}> couldn't be removed from `{categoryname}`.")
await ctx.send(embed=addEmbed(ctx,"red",embedDescription ), delete_after=5)
return
@commands.command(aliases=['ba'])
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.has_permissions(manage_guild=True)
async def betaannouncements(self, ctx, bool:bool):
administratorcheck1 = await administratorcheck(ctx.guild, ctx.author)
if administratorcheck1 == 0:
await ctx.send(embed=await nopermission(ctx), delete_after=5)
return
await deletemessage(ctx)
if bool == True:
insertquery(sql, 'guilds', 'betaannouncements', ('1'), f'guild_id = {ctx.guild.id}')
betaannouncementguilds.append(ctx.guild.id)
if bool == False:
insertquery(sql, 'guilds', 'betaannouncements', ('0'), f'guild_id = {ctx.guild.id}')
betaannouncementguilds.remove(ctx.guild.id)
embedDescription = (f"Beta-Announcements have successfully been set to `{bool}`.")
await ctx.send(embed=addEmbed(ctx,"dark_teal",embedDescription ), delete_after=5)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if isinstance(error, commands.CommandOnCooldown) or isinstance(error, commands.errors.CommandNotFound):
return
else:
pass
@betaannouncements.error
async def clear_error(self, ctx, error):
await unknownerror(ctx, error)
@setup.error
async def clear_error(self, ctx, error):
await unknownerror(ctx, error)
@removerestrict.error
async def clear_error(self, ctx, error):
await unknownerror(ctx, error)
@setmove.error
async def clear_error(self, ctx, error):
if isinstance(error, discord.ext.commands.errors.ChannelNotFound):
await deletemessage(ctx)
await ctx.send(embed=addEmbed(None, "red", f'Please enter a valid category id. `{getprefix2(ctx)}setmove <categoryid> <alias>`'), delete_after=5)
else:
await unknownerror(ctx, error)
@removemove.error
async def clear_error(self, ctx, error):
if isinstance(error, discord.ext.commands.errors.MissingRequiredArgument):
await deletemessage(ctx)
await ctx.send(embed=addEmbed(None, "red", f'Please enter a valid category name. `{getprefix2(ctx)}removemove <categoryname>`'), delete_after=5)
else:
await unknownerror(ctx, error)
@setchannel.error
async def clear_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await deletemessage(ctx)
await ctx.send(embed=addEmbed(None, "red", f'Please specify the channel you would like to set. `{getprefix2(ctx)}setchannel <channel> <id>`', None), delete_after=5)
else:
await unknownerror(ctx, error)
@setrestrict.error
async def clear_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await deletemessage(ctx)
await ctx.send(embed=addEmbed(None, "red", f'Please make sure to give all arguments correctly. `{getprefix2(ctx)}setrestrict <type> <role1> [role2] [role3]`', None), delete_after=5)
else:
await unknownerror(ctx, error)
@setprefix.error
async def clear_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await deletemessage(ctx)
await ctx.send(f'Please specify the prefix you would like to apply. `{getprefix2(ctx)}setprefix <prefix>`', delete_after=5)
else:
await unknownerror(ctx, error)
def setup(client):
client.add_cog(SetCommandCog(client)) | [
"ugurkurtulus06@gmail.com"
] | ugurkurtulus06@gmail.com |
07af59e4fd21911f0e83acdc6a37cd681fb09ee2 | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/10-classes_20200406020619.py | 3c368bc7751070461a6cc8735fe4401547e784e0 | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | # A class is like a blueprint for creating objects. An object has properties and methods(functions) associated with it. Almost everything in Python is an object
# Create class
class User:
def __init__ (self, name, email, age):
self.name = name
self.email = email
self.age = age
def greeting(self):
return f'My name is {self.name} and I am {self.age}'
def has_birthday(self):
self.age +=1
# Init user object
steve = User (' Steve Kelvin', 'kelvin@yahoo.com', 34)
print(steve.name)
# Edit property
steve.name = "Steve Williams"
print(steve.name)
janet = User ('Janet Jones', 'janet@yahoo.com', 34)
print(janet.name)
janet.has_birthday()
print(janet.greeting()) | [
"tikana4@yahoo.com"
] | tikana4@yahoo.com |
ad18dc5bf88fb8322c5e3da5ec55f00d8e8bbcd1 | 4bf28ca3710692f89c58741ccfcf6b7169b1d558 | /exercise-5.py | f84b09681201b7614de4610051a1f8313ed70864 | [] | no_license | AbhishekGangula/python-assignment | eedaca002bc9755bcaf5086171958fe7bfdefed5 | fab17a7a1b83e1240e0ebb2b5d83326a44c1881b | refs/heads/master | 2020-08-06T16:59:33.480790 | 2019-10-21T02:59:08 | 2019-10-21T02:59:08 | 213,084,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,546 | py | class Aircraft:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
print(self.x)
print(self.y)
def move_left(self):
print("Moved Left..")
self.x = self.x-1
def move_right(self):
print("Moved Right..")
self.x = self.x+1
def move_up(self):
self.y = self.y+1
print("Moved Up..")
def move_down(self):
print("Moved Down..")
self.y = self.y-1
instances=["instance1", "instance2", "instance3", "instance4", "instance5"]
def instance_create(self):
for i in range(len(instances)):
if i==0: instances[i] = Aircraft(0,0)
if i==1: instances[i] = Aircraft(3,6)
if i==2: instances[i] = Aircraft(6,12)
if i==3: instances[i] = Aircraft(9,18)
if i==4: instances[i] = Aircraft(12,24)
def final_x_y_coord(self):
for i in range(len(instances)):
print("\nAircraft [",i,"]")
print("Final X-Coord:", instances[i].x)
print("Final Y-Coord:", instances[i].y)
def initial_x_y_coord(self):
print("Creating New Aircraft Object:",i)
print("New Aircraft Object Has Just Been Initalized:",i)
print("Initial X-Coord:",instances[i].x)
print("Initial Y-Coord:",instances[i].y)
def directions(self):
instances[i].move_right()
instances[i].move_right()
instances[i].move_up()
instances[i].move_right()
instances[i].move_left()
instances[i].move_up()
instances[i].move_up()
instances[i].move_down()
instances[i].move_up()
instances[i].move_down()
instances[i].move_up()
class Boeing_747(Aircraft):
def __init__(self,x,y,x_d,y_d):
self.x = x
self.y = y
self.x_d = x_d
self.y_d = y_d
if __name__=='__main__':
instances=["instance1", "instance2", "instance3", "instance4", "instance5"]
for i in range(len(instances)):
if i==0: instances[i] = Boeing_747(11,11,51,58)
if i==1: instances[i] = Boeing_747(11,10,59,51)
if i==2: instances[i] = Boeing_747(11,15,56,56)
if i==3: instances[i] = Boeing_747(5,14,64,50)
if i==4: instances[i] = Boeing_747(7,9,69,59)
instances[i].initial_x_y_coord()
print("New Boeing 747 Object Has Just Been Iniitalized:", i)
print("X-Coord:",instances[i].x_d)
print("Y-Coord:",instances[i].y_d)
instances[i].directions()
instances[i].final_x_y_coord() | [
"abhishekgangula@gmail.com"
] | abhishekgangula@gmail.com |
d200a50db49b6d5069a5b11158af97662fe6b5e8 | f90a65159a2dcd6bb39479a9f38f45690f801f82 | /TheoInf/Exercises/Sheet04/Task4/test.py | 3ba8bc26d575d721b5348e7e6780bc94bf8f5d57 | [] | no_license | BUCKFAE/Semester06 | a0471fd4edec2f8e21ff3fcbb5572a53c6b7915f | ef9098d2f534d14815cadd854971d91b3a196833 | refs/heads/master | 2023-06-24T05:06:49.400571 | 2021-07-20T08:47:16 | 2021-07-20T08:47:16 | 359,149,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from main import prodZ
import unittest
class TestMain(unittest.TestCase):
def test_prodZ(self):
self.assertEqual(prodZ(7, 43), 30)
if __name__ == '__main__':
unittest.main()
| [
"informatik.schubert@gmail.com"
] | informatik.schubert@gmail.com |
946d0aef8341a73ec959d323ddba4161150d250c | b9481ebae49cf19de3b5718c69b84f1b59a8e421 | /apps/catax/south_migrations/0004_auto__del_cacountycounts__del_cacountytax__del_cacountytaxcounts__del_.py | fbfcd071308880a2a7170eebbd4ea9af29d08f1a | [] | no_license | arun-skaria/eracks | 06db7e3715afa2c6992fe09f05d6546520c65459 | 532d8a2be31199e7b78ca5e29944deb0a1400753 | refs/heads/master | 2023-01-08T01:40:10.036585 | 2017-07-13T13:10:42 | 2017-07-13T13:10:42 | 97,123,722 | 0 | 0 | null | 2022-12-26T20:16:17 | 2017-07-13T13:08:12 | HTML | UTF-8 | Python | false | false | 3,563 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Cacountycounts'
db.delete_table(u'cacountycounts')
# Deleting model 'Cacountytax'
db.delete_table(u'cacountytax')
# Deleting model 'Cacountytaxcounts'
db.delete_table(u'cacountytaxcounts')
# Deleting model 'Cacounties'
db.delete_table(u'cacounties')
# Deleting model 'Catax'
db.delete_table(u'catax')
def backwards(self, orm):
# Adding model 'Cacountycounts'
db.create_table(u'cacountycounts', (
('count', self.gf('django.db.models.fields.IntegerField')()),
('county', self.gf('django.db.models.fields.CharField')(max_length=20)),
('tax', self.gf('django.db.models.fields.FloatField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('catax', ['Cacountycounts'])
# Adding model 'Cacountytax'
db.create_table(u'cacountytax', (
('county', self.gf('django.db.models.fields.CharField')(max_length=20)),
('city', self.gf('django.db.models.fields.CharField')(max_length=50)),
('tax', self.gf('django.db.models.fields.FloatField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('catax', ['Cacountytax'])
# Adding model 'Cacountytaxcounts'
db.create_table(u'cacountytaxcounts', (
('count', self.gf('django.db.models.fields.IntegerField')()),
('city', self.gf('django.db.models.fields.CharField')(max_length=50)),
('tax', self.gf('django.db.models.fields.FloatField')()),
('county', self.gf('django.db.models.fields.CharField')(max_length=20)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('catax', ['Cacountytaxcounts'])
# Adding model 'Cacounties'
db.create_table(u'cacounties', (
('county', self.gf('django.db.models.fields.CharField')(max_length=20)),
('city', self.gf('django.db.models.fields.CharField')(max_length=50)),
('tax', self.gf('django.db.models.fields.FloatField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('catax', ['Cacounties'])
# Adding model 'Catax'
db.create_table(u'catax', (
('count', self.gf('django.db.models.fields.IntegerField')()),
('county', self.gf('django.db.models.fields.CharField')(max_length=50)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(default='', max_length=100)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('cities', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=1000, blank=True)),
('tax', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('catax', ['Catax'])
models = {
}
complete_apps = ['catax']
| [
"nijap@techversantinfotech.com"
] | nijap@techversantinfotech.com |
6ef5099badd87e97224f57a7ee93d45a1d6f7e1a | 87b1a551111b39ccaadaee413a108115a3cef1fb | /reader/read.py | 695a3f21f10dae0319d004b13e270c12e4d9de47 | [
"MIT"
] | permissive | callumfrance/brailler | 339e2dcd88e125aee756e08d08952538132fe43c | fddaa934618120702630fdb1b4d15033b868be79 | refs/heads/master | 2021-07-13T23:44:34.306629 | 2020-06-03T10:19:49 | 2020-06-03T10:19:49 | 236,134,154 | 0 | 0 | MIT | 2020-02-20T09:21:28 | 2020-01-25T06:23:47 | Python | UTF-8 | Python | false | false | 1,196 | py | """
This is the Reader class, which is responsible for directly interfacing
with the Liblouis library functions.
This class allows for the program to output Braille from English,
and to turn input Braille into English to be interpreted by the wider
program or to store in a text file.
"""
from louis import translateString, backTranslateString
"""
Some possible tables:
en-GB-g2.ctb <-- Issues with 8-dot cells being generated
en-gb-g1.utb
en-us-g1.ctb
en-us-g2.ctb
en_GB.tbl
en-ueb-g2.ctb <-- Unified English Braille
More at:
https://github.com/liblouis/liblouis/tree/master/tables
or
.../LibLouis/liblouis-3.12.0/tables/
"""
class Reader:
@staticmethod
def back_translate_item(in_str, t_tables=['unicode.dis', 'en-ueb-g2.ctb']):
"""Translates a string from braille into unicode English
"""
result = backTranslateString(t_tables, in_str)
return(result)
@staticmethod
def translate_item(in_str, t_tables=['unicode.dis', 'en-ueb-g2.ctb']):
"""Translates a string from unicode English into braille
"""
result = translateString(t_tables, in_str)
return(result)
| [
"france.callum@gmail.com"
] | france.callum@gmail.com |
746fe6753e7cb096fc1ca57045d8b2b3c6a25ccd | 208367e3e386b0d7b2ac018a3445e6f91bbbb983 | /backend/cmx.py | 750abdacf4f921ecd942760f2b9cffa76d42380e | [] | no_license | mboudreau/InformationAnywhere | 4b4d340067468d8a92031c34b82c6bb8612a9f58 | 7b6ddb95a725b0827d66b646dfbd8c9b76fda9be | refs/heads/master | 2020-03-28T19:35:31.421156 | 2015-03-19T01:13:54 | 2015-03-19T01:13:54 | 32,434,722 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | import urllib2
import ssl
from xml.dom.minidom import *
import os
import re
import xml.sax
import base64
from bottle import route, run, static_file
url = 'https://10.10.20.11/api/contextaware/v1/location/clients'
ssl._create_default_https_context = ssl._create_unverified_context
@route('/location')
def cmx_api():
try:
auth = base64.b64encode('learning:learning')
headers = {'Authorization ':'Basic '+auth}
request = urllib2.Request(url,None,headers)
response = urllib2.urlopen(request)
data = response.read()
response.close()
dom = xml.dom.minidom.parseString(data)
locationNode = dom.firstChild
refNode = locationNode.childNodes[1]
pnode = refNode.childNodes[1]
print pnode.toxml()
# print dom
except urllib2.URLError, e:
print "Error while opening url!!"
print e
run(host='', port=8080, debug=True) | [
"icosmine@gmail.com"
] | icosmine@gmail.com |
6dfc4898f0270733e52f179f1d9a16edb48cee2b | f81e2a5fd58a37076e9943347c4c35032f03d881 | /prediction/__init__.py | e6726be4083f4c109f2ff1ea2e42175bd03b702e | [] | no_license | adamlerer/Ca-Backbone-Prediction | 138216ebe70021bb51d9c22e67ae9e3a694e13a6 | c0e7b2372d9bca4a9de2d3f6a88a16f19a1ff921 | refs/heads/master | 2022-04-14T21:40:29.528843 | 2020-04-14T17:36:49 | 2020-04-14T17:36:49 | 255,676,349 | 7 | 5 | null | 2020-04-14T17:22:22 | 2020-04-14T17:22:21 | null | UTF-8 | Python | false | false | 24 | py | from . import prediction | [
"RyanHarlich@hotmail.com"
] | RyanHarlich@hotmail.com |
b1553fd37ce4d7023824e03dd47f186fba0c03f2 | 8ba1ffdb74308f2a58398d23656cb35f24fc2bbd | /jetstud/asgi.py | 2a41eb41b653c4fbada1648bacb2e2ce50b13e4a | [
"MIT"
] | permissive | dobbiedts/jet_stud | 53d7fd141a3a49bc7e237d2ffe92478655a0f839 | c872bdac6fc2fe4c23701bfa2efd8ec6577d9489 | refs/heads/main | 2023-03-06T13:50:56.382420 | 2021-02-18T11:30:07 | 2021-02-18T11:30:07 | 340,012,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
ASGI config for jetstud project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'jetstud.settings')
application = get_asgi_application()
| [
"olaoluwaakinloye@gmail.com"
] | olaoluwaakinloye@gmail.com |
e54152d7badc7d1101d0565e6b3f5e75b0ef9362 | 7dbae76b9db6ce69c560f96d4c8c642901443f8c | /solver.py | 6b8587c483f24ca6d7ac353cd00d1b676fba01e1 | [] | no_license | Aluriak/crack-fallout-terminals | f68eb23a2f57f0f40e6db95583028e6e39f19bdc | aa53a765bcdaf313fe83a5efed5b4e8a62e00d22 | refs/heads/master | 2020-04-07T00:13:30.292837 | 2018-11-21T15:41:30 | 2018-11-21T15:41:30 | 157,895,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,574 | py | """Module that implement the solving routines for the Fallout terminal puzzle.
Puzzle is:
Words (at least 5, no more than 100) are given.
One of them is the 'key'.
Player win if it gives the key before the 5-th try (therefore, 4 words can be tested).
If player gives a word that is not the key,
he gain knowledge about how many letters are placed at the same place.
(see compute_score for exact definition)
"""
import random
from utils import choose, words_of_size
from collections import Counter
MAX_ATTEMPTS = 4
def create_game_data(word_size:int=8, min_words:int=7, max_words:int=15) -> ([str], str):
words = tuple(words_of_size(word_size))
chosen_words = {w: None for w in choose(random.randint(min_words, max_words), words)}
return chosen_words, next(choose(1, chosen_words.keys()))
def simulate_game_silently(*args, **kwargs):
return simulate_game(*args, **kwargs, silent=True)
def simulate_game(player:callable, words:[str], key:str, silent:bool=False):
"""Simulate a game with given words and key, and given player.
Return the number of attempts made before solving it, or 0 if not found.
"""
assert key in words
if silent:
locals()['print'] = lambda *_, **__: ...
print_words = lambda: ...
else:
def print_words():
print(f'WORDS:')
for word, score in words.items():
print('\t' + word + (f' [{score}]' if score else ' '*5))
# print('\t' + word + (f' [{score}]' if score else ' '*5) + (f' [KEY]' if key == word else ''))
remaining_attempts = MAX_ATTEMPTS
while remaining_attempts > 0:
print_words()
tested_word = next(player(words, remaining_attempts))
print('Word tested >', tested_word)
words[tested_word] = compute_score(tested_word, key)
if tested_word == key:
total_attempts = MAX_ATTEMPTS - remaining_attempts + 1
print(f'Success in {total_attempts} attempts !')
return total_attempts
else:
print('Access denied.')
print(f'{words[tested_word]}/{len(key)} complete.')
remaining_attempts -= 1
assert tested_word != key
print('Failure. The word to find was "' + key + '"')
return 0
def compute_score(word:str, key:str, *, silent:bool=False) -> int:
"""Return the number of valid letters"""
if not silent and len(word) != len(key):
print(f"WARNING: word {word} was scored based on key {key}, which is not the same size ({len(word)} vs {len(key)}).")
# assert len(word) == len(key)
return sum(1 for ca, cb in zip(word, key) if ca == cb)
def valid_candidates(words:{str: int}, remaining_attempts:int, *, silent:bool=False) -> [str]:
"""Yield the words that can be tested.
If only one is returned, it is the key.
If none is returned, it is a bug.
words -- mapping from candidate word to its score, or None if not known.
"""
scored_words = {w: s for w, s in words.items() if s is not None}
if not scored_words: # no enough data: every word is candidate
yield from words
return
for word in words:
if word in scored_words: continue
if all(score == compute_score(word, scored_word, silent=True)
for scored_word, score in scored_words.items()):
# There is exactly as much common letters as the score.
# May be good to try.
yield word
def best_candidates(words:{str: int}, remaining_attempts:int, *, silent:bool=False) -> [str]:
"""Yield the words that can should tested (among the words than can be).
If only one is returned, it is the key.
If none is returned, it is a bug.
words -- mapping from candidate word to its score, or None if not known.
"""
candidates = tuple(valid_candidates(words, remaining_attempts, silent=silent))
print('#candidates:', len(candidates))
print(' candidates:', ', '.join(candidates))
if len(candidates) <= 1: # the last candidate is your best shot
yield from candidates
return
# for each position, count the available letters
position_distributions = map(Counter, zip(*candidates))
if not silent:
print('DISTRIBUTIONS:', tuple(position_distributions))
# TODO: ALT: get the word that would discard the most other words if found invalid.
# while nothing is implemented…
yield from candidates
PLAYERS = valid_candidates, best_candidates
if __name__ == '__main__':
simulate_game(best_candidates, *create_game_data())
| [
"lucas.bourneuf@laposte.net"
] | lucas.bourneuf@laposte.net |
0524c401446b5832cbb3571633fb75ead633e864 | 5147e8ddfd23cc0806f3447847f629a55207dc26 | /evaluation/loader.py | 79c12293e39654eaa61ced8b0e3908dbfca1e54e | [] | no_license | kyraropmet/session-rec | efab354aff1eb7569997e80b0ab8e26d4c12982e | b8a78c1ac92a299acca40a957096e28e211a02da | refs/heads/master | 2022-11-29T01:36:37.936495 | 2019-06-07T09:20:10 | 2019-06-07T09:20:10 | 182,852,579 | 6 | 4 | null | 2022-11-21T21:02:16 | 2019-04-22T19:18:01 | Python | UTF-8 | Python | false | false | 19,597 | py | import time
import os.path
import numpy as np
import pandas as pd
from _datetime import timezone, datetime
def load_data( path, file, rows_train=None, rows_test=None, slice_num=None, density=1, train_eval=False ):
'''
Loads a tuple of training and test set with the given parameters.
Parameters
--------
path : string
Base path to look in for the prepared data files
file : string
Prefix of the dataset you want to use.
"yoochoose-clicks-full" loads yoochoose-clicks-full_train_full.txt and yoochoose-clicks-full_test.txt
rows_train : int or None
Number of rows to load from the training set file.
This option will automatically filter the test set to only retain items included in the training set.
rows_test : int or None
Number of rows to load from the test set file.
slice_num :
Adds a slice index to the constructed file_path
yoochoose-clicks-full_train_full.0.txt
density : float
Percentage of the sessions to randomly retain from the original data (0-1).
The result is cached for the execution of multiple experiments.
Returns
--------
out : tuple of pandas.DataFrame
(train, test)
'''
print('START load data')
st = time.time()
sc = time.clock()
split = ''
if( slice_num != None and isinstance(slice_num, int ) ):
split = '.'+str(slice_num)
train_appendix = '_train_full'
test_appendix = '_test'
if train_eval:
train_appendix = '_train_tr'
test_appendix = '_train_valid'
density_appendix = ''
if( density < 1 ): #create sample
if not os.path.isfile( path + file + train_appendix + split + '.txt.'+str( density ) ) :
train = pd.read_csv(path + file + train_appendix + split + '.txt', sep='\t', dtype={'ItemId':np.int64})
test = pd.read_csv(path + file + test_appendix + split + '.txt', sep='\t', dtype={'ItemId':np.int64} )
sessions = train.SessionId.unique()
drop_n = round( len(sessions) - (len(sessions) * density) )
drop_sessions = np.random.choice(sessions, drop_n, replace=False)
train = train[ ~train.SessionId.isin( drop_sessions ) ]
train.to_csv( path + file + train_appendix +split+'.txt.'+str(density), sep='\t', index=False )
sessions = test.SessionId.unique()
drop_n = round( len(sessions) - (len(sessions) * density) )
drop_sessions = np.random.choice(sessions, drop_n, replace=False)
test = test[ ~test.SessionId.isin( drop_sessions ) ]
test = test[np.in1d(test.ItemId, train.ItemId)]
test.to_csv( path + file + test_appendix +split+'.txt.'+str(density), sep='\t', index=False )
density_appendix = '.'+str(density)
if( rows_train == None ):
train = pd.read_csv(path + file + train_appendix +split+'.txt'+density_appendix, sep='\t', dtype={'ItemId':np.int64})
else:
train = pd.read_csv(path + file + train_appendix +split+'.txt'+density_appendix, sep='\t', dtype={'ItemId':np.int64}, nrows=rows_train)
if( rows_test == None ):
test = pd.read_csv(path + file + test_appendix +split+'.txt'+density_appendix, sep='\t', dtype={'ItemId':np.int64} )
else :
test = pd.read_csv(path + file + test_appendix +split+'.txt'+density_appendix, sep='\t', dtype={'ItemId':np.int64}, nrows=rows_test )
test = test[np.in1d(test.ItemId, train.ItemId)]
session_lengths = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, session_lengths[ session_lengths>1 ].index)]
test.sort_values( ['SessionId', 'ItemId'], inplace=True )
train.sort_values( ['SessionId', 'ItemId'], inplace=True )
#output
data_start = datetime.fromtimestamp( train.Time.min(), timezone.utc )
data_end = datetime.fromtimestamp( train.Time.max(), timezone.utc )
print('Loaded train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n'.
format( len(train), train.SessionId.nunique(), train.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )
data_start = datetime.fromtimestamp( test.Time.min(), timezone.utc )
data_end = datetime.fromtimestamp( test.Time.max(), timezone.utc )
print('Loaded test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n'.
format( len(test), test.SessionId.nunique(), test.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )
check_data(train, test)
print( 'END load data ', (time.clock()-sc), 'c / ', (time.time()-st), 's' )
return (train, test)
def load_data_session( path, file, sessions_train=None, sessions_test=None, slice_num=None, train_eval=False ):
'''
Loads a tuple of training and test set with the given parameters.
Parameters
--------
path : string
Base path to look in for the prepared data files
file : string
Prefix of the dataset you want to use.
"yoochoose-clicks-full" loads yoochoose-clicks-full_train_full.txt and yoochoose-clicks-full_test.txt
rows_train : int or None
Number of rows to load from the training set file.
This option will automatically filter the test set to only retain items included in the training set.
rows_test : int or None
Number of rows to load from the test set file.
slice_num :
Adds a slice index to the constructed file_path
yoochoose-clicks-full_train_full.0.txt
density : float
Percentage of the sessions to randomly retain from the original data (0-1).
The result is cached for the execution of multiple experiments.
Returns
--------
out : tuple of pandas.DataFrame
(train, test)
'''
print('START load data')
st = time.time()
sc = time.clock()
split = ''
if( slice_num != None and isinstance(slice_num, int ) ):
split = '.'+str(slice_num)
train_appendix = '_train_full'
test_appendix = '_test'
if train_eval:
train_appendix = '_train_tr'
test_appendix = '_train_valid'
train = pd.read_csv(path + file + train_appendix +split+'.txt', sep='\t', dtype={'ItemId':np.int64})
test = pd.read_csv(path + file + test_appendix +split+'.txt', sep='\t', dtype={'ItemId':np.int64} )
if( sessions_train != None ):
keep = train.sort_values('Time', ascending=False).SessionId.unique()[:(sessions_train-1)]
train = train[ np.in1d( train.SessionId, keep ) ]
test = test[np.in1d(test.ItemId, train.ItemId)]
if( sessions_test != None ):
keep = test.SessionId.unique()[:(sessions_test-1)]
test = test[ np.in1d( test.SessionId, keep ) ]
session_lengths = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, session_lengths[ session_lengths>1 ].index)]
#output
data_start = datetime.fromtimestamp( train.Time.min(), timezone.utc )
data_end = datetime.fromtimestamp( train.Time.max(), timezone.utc )
print('Loaded train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n'.
format( len(train), train.SessionId.nunique(), train.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )
data_start = datetime.fromtimestamp( test.Time.min(), timezone.utc )
data_end = datetime.fromtimestamp( test.Time.max(), timezone.utc )
print('Loaded test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n'.
format( len(test), test.SessionId.nunique(), test.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )
check_data(train, test)
print( 'END load data ', (time.clock()-sc), 'c / ', (time.time()-st), 's' )
return (train, test)
def load_buys( path, file ):
'''
Load all buy events from the youchoose file, retains events fitting in the given test set and merges both data sets into one
Parameters
--------
path : string
Base path to look in for the prepared data files
file : string
Prefix of the dataset you want to use.
"yoochoose-clicks-full" loads yoochoose-clicks-full_train_full.txt and yoochoose-clicks-full_test.txt
Returns
--------
out : pandas.DataFrame
test with buys
'''
print('START load buys')
st = time.time()
sc = time.clock()
#load csv
buys = pd.read_csv(path + file + '.txt', sep='\t', dtype={'ItemId':np.int64})
print( 'END load buys ', (time.clock()-sc), 'c / ', (time.time()-st), 's' )
return buys
def load_data_userbased( path, file, rows_train=None, rows_test=None, slice_num=None, density=1, train_eval=False ):
'''
Loads a tuple of training and test set with the given parameters.
Parameters
--------
path : string
Base path to look in for the prepared data files
file : string
Prefix of the dataset you want to use.
"yoochoose-clicks-full" loads yoochoose-clicks-full_train_full.txt and yoochoose-clicks-full_test.txt
rows_train : int or None
Number of rows to load from the training set file.
This option will automatically filter the test set to only retain items included in the training set.
rows_test : int or None
Number of rows to load from the test set file.
slice_num :
Adds a slice index to the constructed file_path
yoochoose-clicks-full_train_full.0.txt
density : float
Percentage of the sessions to randomly retain from the original data (0-1).
The result is cached for the execution of multiple experiments.
Returns
--------
out : tuple of pandas.DataFrame
(train, test)
'''
print('START load data')
st = time.time()
sc = time.clock()
split = ''
if( slice_num != None and isinstance(slice_num, int ) ):
split = '.'+str(slice_num)
train_appendix = '_train'
test_appendix = '_test'
if train_eval:
train_appendix = '_train_valid'
test_appendix = '_test_valid'
density_appendix = ''
if( density < 1 ): #create sample
if not os.path.isfile( path + file + train_appendix + split + '.csv.'+str( density ) ) :
train = pd.read_csv(path + file + train_appendix + split + '.csv', sep='\t', dtype={'item_id':np.int64})
test = pd.read_csv(path + file + test_appendix + split + '.csv', sep='\t', dtype={'item_id':np.int64} )
sessions = train.SessionId.unique()
drop_n = round( len(sessions) - (len(sessions) * density) )
drop_sessions = np.random.choice(sessions, drop_n, replace=False)
train = train[ ~train.SessionId.isin( drop_sessions ) ]
train.to_csv( path + file + train_appendix +split+'.csv.'+str(density), sep='\t', index=False )
sessions = test.SessionId.unique()
drop_n = round( len(sessions) - (len(sessions) * density) )
drop_sessions = np.random.choice(sessions, drop_n, replace=False)
test = test[ ~test.SessionId.isin( drop_sessions ) ]
test = test[np.in1d(test.ItemId, train.ItemId)]
test.to_csv( path + file + test_appendix +split+'.csv.'+str(density), sep='\t', index=False )
density_appendix = '.'+str(density)
if( rows_train == None ):
train = pd.read_csv(path + file + train_appendix +split+'.csv'+density_appendix, sep='\t', dtype={'item_id':np.int64})
else:
train = pd.read_csv(path + file + train_appendix +split+'.csv'+density_appendix, sep='\t', dtype={'item_id':np.int64}, nrows=rows_train)
if( rows_test == None ):
test = pd.read_csv(path + file + test_appendix +split+'.csv'+density_appendix, sep='\t', dtype={'item_id':np.int64} )
else :
test = pd.read_csv(path + file + test_appendix +split+'.csv'+density_appendix, sep='\t', dtype={'item_id':np.int64}, nrows=rows_test )
train = rename_cols(train)
test = rename_cols(test)
if( rows_train != None ):
test = test[np.in1d(test.ItemId, train.ItemId)]
#output
data_start = datetime.fromtimestamp( train.Time.min(), timezone.utc )
data_end = datetime.fromtimestamp( train.Time.max(), timezone.utc )
print('Loaded train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n'.
format( len(train), train.SessionId.nunique(), train.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )
data_start = datetime.fromtimestamp( test.Time.min(), timezone.utc )
data_end = datetime.fromtimestamp( test.Time.max(), timezone.utc )
print('Loaded test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n'.
format( len(test), test.SessionId.nunique(), test.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat() ) )
print( 'END load data ', (time.clock()-sc), 'c / ', (time.time()-st), 's' )
return (train, test)
def check_data( train, test ):
if 'ItemId' in train.columns and 'SessionId' in train.columns:
new_in_test = set( test.ItemId.unique() ) - set( train.ItemId.unique() )
if len( new_in_test ) > 0:
print( 'WAAAAAARRRNIIIIING: new items in test set' )
session_min_train = train.groupby( 'SessionId' ).size().min()
if session_min_train == 0:
print( 'WAAAAAARRRNIIIIING: session length 1 in train set' )
session_min_test = test.groupby( 'SessionId' ).size().min()
if session_min_test == 0:
print( 'WAAAAAARRRNIIIIING: session length 1 in train set' )
else:
print( 'data check not possible due to individual column names' )
def rename_cols(df):
names = {}
names['item_id']= 'ItemId'
names['session_id']= 'SessionId'
names['user_id']= 'UserId'
names['created_at']= 'Time'
for col in list( df.columns ):
if col in names:
df[ names[col] ] = df[ col ]
del df[col]
return df
def count_repetitions(path, file, rows_train=None, rows_test=None, slice_num=None, density=1, train_eval=False):
'''
Loads a tuple of training and test set with the given parameters.
Parameters
--------
path : string
Base path to look in for the prepared data files
file : string
Prefix of the dataset you want to use.
"yoochoose-clicks-full" loads yoochoose-clicks-full_train_full.txt and yoochoose-clicks-full_test.txt
rows_train : int or None
Number of rows to load from the training set file.
This option will automatically filter the test set to only retain items included in the training set.
rows_test : int or None
Number of rows to load from the test set file.
slice_num :
Adds a slice index to the constructed file_path
yoochoose-clicks-full_train_full.0.txt
density : float
Percentage of the sessions to randomly retain from the original data (0-1).
The result is cached for the execution of multiple experiments.
Returns
--------
out : tuple of pandas.DataFrame
(train, test)
'''
print('START load data')
st = time.time()
sc = time.clock()
split = ''
if (slice_num != None and isinstance(slice_num, int)):
split = '.' + str(slice_num)
train_appendix = '_train_full'
test_appendix = '_test'
if train_eval:
train_appendix = '_train_tr'
test_appendix = '_train_valid'
density_appendix = ''
if (density < 1): # create sample
if not os.path.isfile(path + file + train_appendix + split + '.txt.' + str(density)):
train = pd.read_csv(path + file + train_appendix + split + '.txt', sep='\t', dtype={'ItemId': np.int64})
test = pd.read_csv(path + file + test_appendix + split + '.txt', sep='\t', dtype={'ItemId': np.int64})
sessions = train.SessionId.unique()
drop_n = round(len(sessions) - (len(sessions) * density))
drop_sessions = np.random.choice(sessions, drop_n, replace=False)
train = train[~train.SessionId.isin(drop_sessions)]
train.to_csv(path + file + train_appendix + split + '.txt.' + str(density), sep='\t', index=False)
sessions = test.SessionId.unique()
drop_n = round(len(sessions) - (len(sessions) * density))
drop_sessions = np.random.choice(sessions, drop_n, replace=False)
test = test[~test.SessionId.isin(drop_sessions)]
test = test[np.in1d(test.ItemId, train.ItemId)]
test.to_csv(path + file + test_appendix + split + '.txt.' + str(density), sep='\t', index=False)
density_appendix = '.' + str(density)
if (rows_train == None):
train = pd.read_csv(path + file + train_appendix + split + '.txt' + density_appendix, sep='\t',
dtype={'ItemId': np.int64})
else:
train = pd.read_csv(path + file + train_appendix + split + '.txt' + density_appendix, sep='\t',
dtype={'ItemId': np.int64}, nrows=rows_train)
if (rows_test == None):
test = pd.read_csv(path + file + test_appendix + split + '.txt' + density_appendix, sep='\t',
dtype={'ItemId': np.int64})
else:
test = pd.read_csv(path + file + test_appendix + split + '.txt' + density_appendix, sep='\t',
dtype={'ItemId': np.int64}, nrows=rows_test)
test = test[np.in1d(test.ItemId, train.ItemId)]
session_lengths = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, session_lengths[session_lengths > 1].index)]
test.sort_values('SessionId', inplace=True)
train.sort_values('SessionId', inplace=True)
# output
data_start = datetime.fromtimestamp(train.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(train.Time.max(), timezone.utc)
print('Loaded train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n'.
format(len(train), train.SessionId.nunique(), train.ItemId.nunique(), data_start.date().isoformat(),
data_end.date().isoformat()))
data_start = datetime.fromtimestamp(test.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(test.Time.max(), timezone.utc)
print('Loaded test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n'.
format(len(test), test.SessionId.nunique(), test.ItemId.nunique(), data_start.date().isoformat(),
data_end.date().isoformat()))
check_data(train, test)
df_out = train[train.duplicated(subset=['SessionId', 'ItemId'], keep=False)] \
.groupby('SessionId')['ItemId'] \
.agg({'ItemId': 'nunique'}) \
.rename(columns={'ItemId': 'Duplicates'})
print(df_out.reset_index())
print("Number of sessions: " + str(df_out.shape[0]))
print("More than 1 repetition: "+str(df_out[df_out['Duplicates'] > 1].count()))
print('END load data ', (time.clock() - sc), 'c / ', (time.time() - st), 's')
return (train, test)
| [
"korw.yraro@gmail.com"
] | korw.yraro@gmail.com |
7e6054637c08dea4cd7585e802b01d0005e53500 | 71d355d11c7150c3dcf9b19100441188c0e10db0 | /DataPersistence/Survey.py | e4cadf86702a9bd1cb8664d68261455dcde597d6 | [] | no_license | VeeanPrasad/cu-feedback | efe0aee3e26223e14c43984b7a23d083dfd96e75 | d48c33b56a28362575bfe22b9252ad3f976a07b0 | refs/heads/master | 2023-04-25T08:04:29.408380 | 2021-05-04T21:08:29 | 2021-05-04T21:08:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | from __future__ import print_function
import pandas as pd
import numpy as np
from download_sheet_to_csv import download_sheet_to_csv
from get_api_services import get_api_services
from get_spreadsheet_id import get_spreadsheet_id
from db import Database
db = Database()
db.start_db()
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
csv_name = "sample_data.csv"
sample_comments = ['no comment, good job','more examples please','slow down, we cannot keep up',
'na','none','',
'slower!','share your slides please','you\'re doing great!']
def simulate_survey():
conn = connect_to_db()
data = pd.DataFrame()
num_responses = 25
response_values = [1, 2, 3, 4, 5]
data['timestamp'] = np.random.choice(['', ''], num_responses)
data['response_1'] = np.random.choice(response_values, num_responses)
data['response_2'] = np.random.choice(response_values, num_responses)
data['response_3'] = np.random.choice(response_values, num_responses)
data['performance_rating'] = np.random.choice(response_values, num_responses)
data['comment'] = np.random.choice(sample_comments, num_responses)
# For the MVP
data.to_csv(csv_name, index=False)
# For the ensuing project
data.to_sql('survey_data', conn, if_exists='replace', index=True)
conn.close()
def connect_to_db():
started = db.start_db()
return started
def download_spreadsheet_as_csv(spreadsheet_name):
drive, sheets = get_api_services()
spreadsheet_id = get_spreadsheet_id(drive, spreadsheet_name)
download_sheet_to_csv(sheets, spreadsheet_id, "Form Responses 1")
def import_data():
df = pd.read_csv("Form Responses 1.csv")
df.to_sql()
| [
"james.m.luther@gmail.com"
] | james.m.luther@gmail.com |
3535beab6740f78dbca4296b6132ad5ee5cf65b3 | 8078d588f00a21096ef64132d306b5018a27161e | /tf_mnist_test.py | ada70d0abfc2a7be1e9a5badb4927e5e2b605741 | [] | no_license | ZhangAoCanada/tf2_test | fa97a9ba795f4ca9bf153a81d092f1095b8f81d4 | 80e746577698f7b21addbbd9ebffd023dfddc838 | refs/heads/master | 2022-10-11T10:35:03.427944 | 2020-06-11T15:45:37 | 2020-06-11T15:45:37 | 271,570,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,727 | py | import os
##### set specific gpu #####
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import tensorflow as tf
import tensorflow.nn as nn
import tensorflow.keras as k
import numpy as np
from tqdm import tqdm
class TestModel(tf.Module):
def __init__(self, input_shape, conv_channels_list, output_size):
"""
Args:
input_shape -> [w, h, channels]
conv_channels_list -> list of ints indicating
output channels of each
convolutional layer
outputsize -> size of labels [classes, ]
"""
super(TestModel, self).__init__()
self.initializer = tf.initializers.GlorotNormal()
self.input_shape = input_shape
conv_channels_list.insert(0, input_shape[-1])
self.conv_lists = conv_channels_list
self.mlp_input_size = self.getMlpInputSize()
self.output_size = output_size
self.filters = self.getAllFilters()
self.weights = self.getWeights(self.mlp_input_size, self.output_size)
self.bias = self.getBias(self.output_size)
def getMlpInputSize(self):
final_feature_size = []
for j in range(len(self.input_shape) - 1):
current_shape = self.input_shape[j]
for i in range(len(self.conv_lists) - 1):
current_shape = current_shape // 2
if current_shape == 0:
current_shape += 1
final_feature_size.append(current_shape)
output = 1.
for i in range(len(final_feature_size)):
output *= final_feature_size[i]
output *= self.conv_lists[-1]
output = int(output)
return output
def getAllFilters(self):
self.filter_shape = (len(self.input_shape) - 1) * [3]
self.num_conv_layers = len(self.conv_lists) - 1
filter_list = []
for ind in range(self.num_conv_layers):
filter_list.append(self.getFilter(self.filter_shape, \
self.conv_lists[ind], self.conv_lists[ind+1]))
return filter_list
def getWeights(self, input_size, output_size):
weights_shape = [input_size, output_size]
return tf.Variable(self.initializer(weights_shape))
def getBias(self, output_size):
bias_shape = [output_size]
return tf.Variable(self.initializer(bias_shape))
def getFilter(self, filter_shape=[3, 3], input_ch=1, output_ch=16):
filter_full_shape = filter_shape + [input_ch, output_ch]
return tf.Variable(self.initializer(filter_full_shape))
def conv3D(self, x, filters, strides, padding="SAME"):
output = nn.convolution(x, filters, strides = strides,\
padding = padding)
return output
def mlp(self, x):
output = tf.matmul(x, self.weights) + self.bias
return output
def __call__(self, x):
strides_len = (len(self.input_shape) - 1)
output = x
for conv_i in range(len(self.conv_lists)-1):
output = self.conv3D(output, self.filters[conv_i],
strides = [1]*strides_len, padding = "SAME")
output = nn.pool(output, [3]*strides_len, "MAX", [2]*strides_len, "SAME")
output = tf.reshape(output, [output.shape[0], -1])
output = self.mlp(output)
return output
def getLoss(logits, labels):
return nn.softmax_cross_entropy_with_logits(labels, logits)
def getOptimizer(lr):
return k.optimizers.Adam(learning_rate=lr)
def getLrDecay(lr_initial, decay_steps):
return k.optimizers.schedules.ExponentialDecay(lr_initial, decay_steps, 0.96, True)
def getMetrics():
return k.metrics.BinaryAccuracy()
# return k.metrics.CategoricalAccuracy()
@tf.function
def train_step(model, optimizer, data, labels):
with tf.GradientTape() as tape:
logits = model(data)
loss = getLoss(logits, labels)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
display_loss = tf.math.reduce_mean(loss)
return display_loss
@tf.function
def test_step(model, metrics, data, labels):
logits = model(data)
pred = nn.softmax(logits)
_ = metrics.update_state(labels, pred)
def trainDataGenerator(x_train, y_train, batch_size):
total_batches = len(x_train) // batch_size
for i in range(total_batches):
batch_data = x_train[i*batch_size: (i+1)*batch_size]
batch_labels = y_train[i*batch_size: (i+1)*batch_size]
yield batch_data, batch_labels, total_batches, i
def main():
# prepare dataset
mnist = k.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = tf.cast(x_train, tf.float32), tf.cast(x_test, tf.float32)
x_train = x_train[:, :, tf.newaxis, :, tf.newaxis]
x_test = x_test[:, :, tf.newaxis, :, tf.newaxis]
x_train = tf.tile(x_train, [1, 1, x_train.shape[1], 1, 1])
x_test = tf.tile(x_test, [1, 1, x_train.shape[1], 1, 1])
y_train = tf.one_hot(y_train, 10)
y_test = tf.one_hot(y_test, 10)
# basic training parameters
batch_size = 10
epoch = 50
lr_initial = 2e-4
decay_steps = 1000
# customized parameters
input_shape = x_train.shape[1:]
print("--------------- input shape -----------------")
print(input_shape)
conv_channels_list = [16, 32]
output_size = y_train.shape[-1]
# building traning testing models and etc
m = TestModel(input_shape, conv_channels_list, output_size)
lr = getLrDecay(lr_initial, decay_steps)
optimizer = getOptimizer(lr)
metrics = getMetrics()
# tensorboard settings
log_dir = "./log/"
if not os.path.exists(log_dir):
os.mkdir(log_dir)
summary_writer = tf.summary.create_file_writer(log_dir)
# checkpoint settings
checkpnt_dir = "./checkpoint"
if not os.path.exists(checkpnt_dir):
os.mkdir(checkpnt_dir)
checkpnt_prefix = os.path.join(checkpnt_dir, "mnist_test.ckpt")
checkpoint = tf.train.Checkpoint(model=m)
ckpt_manager = tf.train.CheckpointManager(checkpoint, checkpnt_dir, max_to_keep=3)
# restore settings
checkpoint.restore(ckpt_manager.latest_checkpoint)
if ckpt_manager.latest_checkpoint:
print("Restored from {}".format(ckpt_manager.latest_checkpoint))
else:
print("Initializing from scratch.")
# start training
for epoch_i in tqdm(range(epoch)):
for data, labels, total_batches, batch_id in \
trainDataGenerator(x_train, y_train, batch_size):
loss = train_step(m, optimizer, data, labels)
if (total_batches*epoch_i + batch_id) % 100 == 0:
metrics.reset_states()
for test_data, test_labels, _, _ in \
trainDataGenerator(x_test, y_test, batch_size):
test_step(m, metrics, x_test, y_test)
acc = metrics.result()
with summary_writer.as_default():
tf.summary.scalar('loss', loss, step=total_batches*epoch_i + batch_id)
tf.summary.scalar('accuracy', acc, step=total_batches*epoch_i + batch_id)
save_path = ckpt_manager.save()
print("Saved checkpoint for step {}: {}".format(\
int(total_batches*epoch_i+batch_id), save_path))
if __name__ == "__main__":
main()
| [
"zhangaocanada@gmail.com"
] | zhangaocanada@gmail.com |
14e1ced50a009fefd68e6f8b8fd0bc506b335fb4 | c596fad20771dadba78c4be2b4ffb02ac62526f8 | /Lektion 6/opgave 2.py | 3d79fe0222497b40742e0584460f8b6a7cfe9ebb | [] | no_license | PanTroglodytes/pythonkursus | 717b34d3c97c89c38f6a0eb5508a2ba72b20133b | cdf6faecc37feef7679c470e1656952fe047315f | refs/heads/master | 2020-04-23T08:43:07.659029 | 2019-03-25T06:03:21 | 2019-03-25T06:03:21 | 171,046,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import klasser as k
bog1 = k.Bog(12345680,"Python Crash Course",1,0,2016,525,"Eric Matthes")
materiale2 = k.Materiale(12345679,"Absolute Jacoby",2,1,2020)
film1 = k.Film(12345681,"Alien",15,2,1978,"Ridley Scott",124)
print(bog1.tostring())
print(film1.tostring())
| [
"noreply@github.com"
] | PanTroglodytes.noreply@github.com |
1209bebfea5666bb0bb500481f397bfdbf6a3388 | 3ca67d69abd4e74b7145b340cdda65532f90053b | /programmers/난이도별/level01.숫자_문자열과_영단어/sangmandu.py | bb3a59dcba7d466757ec9492730d0ba4801d0876 | [] | no_license | DKU-STUDY/Algorithm | 19549516984b52a1c5cd73e1ed1e58f774d6d30e | 6f78efdbefd8eedab24e43d74c7dae7f95c2893b | refs/heads/master | 2023-02-18T06:48:39.309641 | 2023-02-09T07:16:14 | 2023-02-09T07:16:14 | 258,455,710 | 175 | 49 | null | 2023-02-09T07:16:16 | 2020-04-24T08:42:27 | Python | UTF-8 | Python | false | false | 559 | py | '''
https://programmers.co.kr/learn/courses/30/lessons/81301
숫자 문자열과 영단어
[풀이]
1. list의 index와 실제 영어 단어를 매칭
2. replace 함수는 한번만이 아니라 해당하는 모든 문자열을 바꾼다.
=> find 함수는 하나만 찾기 때문에 이런 문자열 함수들을 자세히 알아둬야함
'''
def solution(s):
number = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten']
for idx, n in enumerate(number):
s = s.replace(n, str(idx))
return int(s)
'''
''' | [
"45033215+sangmandu@users.noreply.github.com"
] | 45033215+sangmandu@users.noreply.github.com |
05fed7e484d6901c2f187f70dbd36135174e0781 | c7ddc78c982348773ece7c81e46a8ec59fdf4e5f | /git2labnotebook/git2labnotebook.py | c4c834107d85d78ae893d25d88214b25638ac936 | [
"MIT"
] | permissive | afrendeiro/git2labnotebook | a96b476467c96838be6fea6e93490f2dc53e1ab3 | 4ddee343bcec33e123c267d75880f8196d3bb96f | refs/heads/master | 2021-01-22T06:28:36.007728 | 2017-10-24T09:44:40 | 2017-10-24T09:44:40 | 81,766,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,809 | py | #!/usr/bin/env python
import datetime
import os
import sys
from argparse import ArgumentParser
import git
import pandas as pd
from pkg_resources import resource_filename
try:
import pypandoc
except ImportError:
print("PDF output requires pandoc and pypandoc installed. Will not output PDF.")
def valid_date(s):
import argparse
try:
return datetime.datetime.strptime(s, "%Y%m%d")
except ValueError:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def parse_arguments():
description = "%(prog)s - Make labnotebook entries from git commits."
epilog = "For command line options of each command, type: %(prog)s COMMAND -h"
epilog += "\nhttps://github.com/afrendeiro/git2labnotebook"
parser = ArgumentParser(description=description, epilog=epilog)
parser.add_argument("--version", action="version", version="%(prog)s " + "get version")
parser.add_argument(
"-d", "--repos-dir",
dest="repos_dir",
default=os.path.abspath(os.path.curdir),
help="Directory with git repositories to inspect. Default=current directory.")
today = datetime.datetime.now().strftime('%Y%m%d')
parser.add_argument(
'-o', '--output-dir', dest="output_dir",
default=os.path.join(os.path.expanduser("~"), "git2labnotebook", today),
help="Directory to write output to. Will be created if not existing. Default is the $HOME/git2labnotebook/{}.".format(today))
parser.add_argument(
'-f', '--from', dest="date_from",
type=valid_date, default=None,
help="Earlier date to filter entries to. Format=YYYYMMDD. Default=None.")
parser.add_argument(
'-t', '--to', dest="date_to",
type=valid_date, default=today,
help="Later date to filter entries to. Format=YYYYMMDD. Default=None.")
parser.add_argument(
'-u', '--git-user', dest="git_user",
default=None,
help="Git username to filter commits for (all others will be ignored). Default None.")
parser.add_argument(
'-fr', '--filter-repos', dest="repo_filter",
default=None,
help="Comma-separated list of repositories to ignore. Default None.")
parser.add_argument(
'--no-summary', dest="daily_summary", action="store_false",
help="Whether daily entries will not contain a summary of lines changed. Default=False.")
parser.add_argument(
'--fill', dest="fill", action="store_true",
help="Whether workdays without entries should be filled with random messages from a pool. Default=False.")
args = parser.parse_known_args()
if args[0].repo_filter is not None:
args[0].repo_filter = args[0].repo_filter.split(",")
return args
def get_commits(repos_dir):
"""
"""
output = pd.DataFrame()
for repo in os.listdir(repos_dir):
repo_dir = os.path.join(repos_dir, repo, ".git")
# run git log command
try:
g = git.Git(repo_dir)
out = g.log("--numstat", "--date=iso")
except: # will occur if not a git repo
continue
# write repo header to file
parsed = parse_git_log_stats(out)
parsed["repository"] = repo
output = output.append(parsed)
return output
def parse_git_log_stats(log):
"""
The collected stats will contain (list of lists):
- commit hash
- author
- date
- commit message
- file name
- change_type [ins, del]
- change (one entry per file)
"""
entries = list()
for line in log.split("\n"):
if line.startswith("commit"):
# new commit
commit = line.split(" ")[1]
# check if previous co
elif line.startswith("Author:"):
author = line.split("Author:")[1].strip()
elif line.startswith("Date:"):
date = line.split("Date:")[1].strip()
elif line.startswith(" " * 4):
msg = line.strip()
elif line.startswith(tuple([str(i) for i in range(10)]) + ("-", )):
file_name = line.split("\t")[-1]
for change, change_type in zip(line.split("\t")[:-1], ("ins", "del")):
entries.append([commit, author, date, msg, file_name, change_type, change])
return pd.DataFrame(
entries, columns=["hash", "author", "date", "message", "file_name", "change_type", "change"])
def gitlog_to_markdown(git_log, min_day=None, max_day=None, daily_summary=True, fill_messages=False):
"""
"""
t_day_header = """# {date}\n"""
t_repo = """### "{repo}" project.\n"""
t_commit = """ - {message}"""
t_day_footer = """Total of {} deleted/modified lines and {} added lines across all projects.\n"""
git_log["date"] = pd.to_datetime(git_log["date"])
git_log = git_log.sort_values("date").set_index(['date'])
git_log.index = git_log.index.date
# calculate changes (insertion/deletions separately)
daily_changes = git_log.reset_index().groupby(["index", "change_type"])["change"].apply(
lambda x: sum([int(i) for i in x if i != "-"]))
if min_day is None:
min_day = git_log.index.min()
else:
min_day = pd.to_datetime(min_day).to_pydatetime()
if max_day is None:
max_day = git_log.index.max()
else:
max_day = pd.to_datetime(max_day).to_pydatetime()
text = list()
for day in pd.date_range(min_day, max_day, freq="D"):
try:
day_commits = git_log.ix[day.date()]
except KeyError:
if fill_messages:
text.append(t_day_header.format(date=day.date()))
text.append(t_commit.format(message="Reading papers.\n"))
else:
continue
if fill_messages:
if day_commits.shape[0] < 1:
if day.isoweekday() < 6:
text.append(t_day_header.format(date=day.date()))
text.append(t_commit.format(message="Reading papers.\n"))
continue
else:
continue
if day.isoweekday() < 6:
text.append(t_day_header.format(date=day.date()))
for repo in day_commits["repository"].drop_duplicates():
text.append(t_repo.format(repo=repo))
for commit in day_commits[day_commits["repository"] == repo]["hash"].drop_duplicates():
text.append(t_commit.format(message=git_log[git_log["hash"] == commit]["message"].drop_duplicates().squeeze()))
if daily_summary:
text.append(t_day_footer.format(daily_changes.loc[day.date().isoformat(), "del"], daily_changes.loc[day.date().isoformat(), "ins"]))
return text
def markdown_to_docx(markdown_input, pdf_output):
try:
pypandoc.convert_file(
markdown_input, 'docx',
outputfile=pdf_output,
extra_args=["--reference-docx={}".format(
resource_filename("git2labnotebook", os.path.join("pandoc_templates", "reference.docx")))])
except RuntimeError as e:
print("Conversion to DOCX failed:")
print(e)
def markdown_to_pdf(markdown_input, pdf_output):
try:
pypandoc.convert_file(
markdown_input, 'latex',
outputfile=pdf_output,
extra_args=["--latex-engine=xelatex", '-V', 'geometry:margin=3cm', '-V', 'fontsize=10pt'])
except RuntimeError as e:
print("Conversion to PDF failed:")
print(e)
def main():
# Parse command-line arguments
args, _ = parse_arguments()
# Get commits for all repositories
git_log = get_commits(args.repos_dir)
# Filter by user if required
if args.git_user:
git_log = git_log[git_log["author"].str.contains(args.git_user)]
# Filter projects if required
if args.git_user:
git_log = git_log[~git_log["repository"].str.contains("|".join(args.repo_filter))]
# Save log
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
git_log.to_csv(os.path.join(args.output_dir, "git_log.csv"), index=False, encoding="utf-8")
# Make entries in markdown
text = gitlog_to_markdown(
git_log,
min_day=args.date_from, max_day=args.date_to,
daily_summary=args.daily_summary, fill_messages=args.fill)
with open(os.path.join(args.output_dir, "notebook.md"), "w") as handle:
handle.writelines("\n".join(text))
# Output
markdown_to_docx(args.output_dir, "notebook.md"), args.output_dir, "notebook.docx"))
markdown_to_pdf(args.output_dir, "notebook.md"), args.output_dir, "notebook.pdf"))
if __name__ == '__main__':
try:
sys.exit(main())
except KeyboardInterrupt:
print("Program canceled by user!")
sys.exit(1)
| [
"afrendeiro@gmail.com"
] | afrendeiro@gmail.com |
6e08160889fe8d6df4d53493afc406ae76304417 | 8469ddb78cfd06e69bc56ce22f939a37d7631e91 | /train.py | 65dcf67139f1830cee317842147150f0374d4550 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | tsbiosky/Imbalanced-Dataset-Project | 7dcc4181467eaea62d27b426e9f862192a7b05f2 | 26037dc1edc51228c22372638a1187f5f0ae15e4 | refs/heads/master | 2023-01-12T07:23:49.821501 | 2020-11-16T07:10:08 | 2020-11-16T07:10:08 | 287,502,612 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,186 | py | import time
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_model
import cv2
import os
import numpy as np
from util.visualizer import Visualizer
opt = TrainOptions().parse()
opt.filename='train45'
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training images = %d' % dataset_size)
#print(opt)
model = create_model(opt)
opt.isTrain=True
#visualizer = Visualizer(opt)
CRITIC_ITERS = 5
total_steps = 0
iter_d = 0
only_d = False
for epoch in range(1, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
for i, data in enumerate(dataset):
#print("image:"+str(i))
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter = total_steps - dataset_size * (epoch - 1)
model.set_input(data)
if iter_d <= CRITIC_ITERS-1:
only_d = False
else:
only_d = True
try:
model.optimize_parameters(only_d)
except:
print("skip image:" )
continue
if total_steps % opt.save_latest_freq == 0:
print('saving the latest model (epoch %d, total_steps %d)' %
(epoch, total_steps))
model.save('latest')
iter_d += 1
if iter_d == 6:
iter_d = 0
try:
errors = model.get_current_errors()
except:
print("skip image:")
continue
t = (time.time() - iter_start_time) / opt.batchSize
print(epoch,errors)
#if epoch % opt.save_epoch_freq == 0 or epoch==1:
print('saving the model at the end of epoch %d, iters %d' %
(epoch, total_steps))
model.save('latest')
model.save(epoch)
re=model.get_current_visuals()
img=re['img']
seg=re['seg']
fake=re['fake']
dis=np.concatenate((img,seg,fake),axis = 1)
cv2.imwrite("./image_debug/" +str(epoch) + ".png",dis)
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
if epoch > opt.niter:
model.update_learning_rate()
| [
"a419348285@outlook.com"
] | a419348285@outlook.com |
e83dd2ce6bbdb98e463c2811eb360298bd7c06e6 | 70f3b003bd37b0485b4dc77cb7dd7a44eef22d11 | /Ball_Tracking_Class.py | 6d25199582a9d70651aba9082d74ed8709034322 | [] | no_license | fotherja/SoccerBot-Control-for-RPi3 | 5970915f5a2b46200cf2535175a1d708aa9efe13 | d3b51e9b7b1d9590d1560f26bd6890ee7307faa8 | refs/heads/master | 2020-04-07T00:32:39.485749 | 2018-11-24T16:11:18 | 2018-11-24T16:11:18 | 157,907,905 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,534 | py | import numpy as np
import time
import cv2
class TrackingBallFns:
def __init__(self):
self.Ball_Center = np.array([0,0])
self.Prev_Ball_C = np.array([0,0])
self.Displacement = np.array([0,0])
self.Gross_Travel_Displacement = np.array([0,0])
self.Position_200ms_ago = np.array([0,0])
self.Last_Pos_Update_TS = 0
def Constrain_Search_Region_Boundaries(self, center, size):
# simply creates a search box around a center point which is within the 640,480 frame
Constrained_center = np.clip(center, [size/2, size/2], [639 - size/2, 479 - size/2])
xmin = int(Constrained_center[0] - size/2)
xmax = int(Constrained_center[0] + size/2)
ymin = int(Constrained_center[1] - size/2)
ymax = int(Constrained_center[1] + size/2)
return xmin, xmax, ymin, ymax
def Detect_Ball(self, frame):
# Blurs frame, does an HSV mask, finds largest blob and return the center of it
GausBlur = cv2.GaussianBlur(frame, (5, 5), 0)
hsv = cv2.cvtColor(GausBlur, cv2.COLOR_BGR2HSV)
lower_thres = np.array([7,170,25])
upper_thres = np.array([12,255,255])
mask = cv2.inRange(hsv, lower_thres, upper_thres)
_,contours,_ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if len(contours) != 0:
#find the biggest area
c = max(contours, key = cv2.contourArea)
M = cv2.moments(c)
if(M['m00'] == 0):
return np.array([0,0]), 0
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
self.Ball_Center = np.array([cx,cy])
return self.Ball_Center, 1
return np.array([0,0]), 0
def Track_Ball(self, frame):
# Identifies the displacement of the ball from 1 frame to next and searches in small region around this predicted point
self.Displacement = self.Ball_Center - self.Prev_Ball_C
self.Displacement = np.clip(self.Displacement, -20, 20)
Next_Search_Center = self.Ball_Center + self.Displacement
xmin, xmax, ymin, ymax = self.Constrain_Search_Region_Boundaries(Next_Search_Center, 60)
Search_Frame = frame[ymin:ymax,xmin:xmax]
position_in_SF, Found_Flag = self.Detect_Ball(Search_Frame)
self.Ball_Center = position_in_SF + [xmin,ymin]
self.Prev_Ball_C = self.Ball_Center
return self.Ball_Center, Found_Flag
def perp(self, a):
b = np.empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
def Segment_Intersect(self, a1,a2, b1,b2):
da = a2-a1
db = b2-b1
dp = a1-b1
dap = self.perp(da)
denom = np.dot(dap, db)
num = np.dot(dap, dp )
return (num / denom.astype(float))*db + b1
def Ball_Heading_For_Goal(self, a1,a2, b1,b2):
Intersect_Point = self.Segment_Intersect(a1,a2, b1,b2)
if(Intersect_Point[1] > 0 and Intersect_Point[1] < 479):
Intersect_Point[1] = np.clip(Intersect_Point[1], 120, 330)
return Intersect_Point, True
return np.array([50,225]), False
def Predict_Ball_Path(self, frame_timestamp):
if(time.time() > self.Last_Pos_Update_TS + 0.2):
self.Last_Pos_Update_TS = time.time()
self.Gross_Travel_Displacement = self.Ball_Center - self.Position_200ms_ago
self.Position_200ms_ago = self.Ball_Center
Expected_Position_in_200ms = self.Ball_Center + self.Gross_Travel_Displacement
Expected_Position_in_2s = self.Ball_Center + (self.Gross_Travel_Displacement * 10)
return Expected_Position_in_2s
| [
"noreply@github.com"
] | fotherja.noreply@github.com |
1a73d0345f90c3568d494d72d2fd48dd037f3ee8 | 4a24e79774288b9a6cfeddafadbf6761c1bf6024 | /57. Insert Interval.py | 80429131e04979eb1231639a2d0ac0d870f63d4e | [] | no_license | NemanjaStamatovic/LeetCode | d8cb243419e96b86b799332edc70139e48c8e382 | fbe6187992547781e3abeee614e0c668a0f40150 | refs/heads/main | 2023-04-04T22:45:49.762445 | 2021-04-06T16:58:00 | 2021-04-06T16:58:00 | 316,270,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | class Solution:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
intervals.append(newInterval)
intervals = sorted(intervals, key=lambda x: x[0])
merged = []
for interval in intervals:
if len(merged) == 0:
merged.append(interval)
elif merged[-1][1] < interval[0]:
merged.append(interval)
else:
merged[-1][1] = max(merged[-1][1], interval[1])
return merged | [
"noreply@github.com"
] | NemanjaStamatovic.noreply@github.com |
f680a1a80743e7f387486682b9717f2bc5353e9a | 68e2b27475f2073d9c76559406aec2f6c457cdcd | /Problems/Logistic function/task.py | d6a061be4477da878fcad1d7846b43c6128209b8 | [] | no_license | thuylinh225/Hyperskill_credit_calculator | 6cc15aadf5596dd3603a98d9a393918fd0815792 | d8bb85a9a195a700334923b7e10c67021c712151 | refs/heads/master | 2022-11-29T07:33:50.976990 | 2020-07-31T20:54:00 | 2020-07-31T20:54:00 | 284,127,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | import math
x = int(input())
logistic = math.exp(x) / (math.exp(x) + 1)
print(round(logistic, 2))
| [
"tranthuylinh225@gmail.com"
] | tranthuylinh225@gmail.com |
864c3a9ce4df58453c97c4f74092ffeb671660d1 | ea92cc24b190018e80d31f4810a8cb60e80bc937 | /greetings/urls.py | 42055bfcabd3781af6fc664bf6b4aa63f5743712 | [] | no_license | rahul2240/helloworld | c1f86475206ba97283a4d1f00a5cb5ce9e9311bc | 191ed9f8442c64e2f75c5dd601cd1bc50f3ec6b4 | refs/heads/master | 2021-08-06T08:00:37.406345 | 2017-11-04T06:17:05 | 2017-11-04T06:17:15 | 109,423,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | from django.conf.urls import url
from .views import index
urlpatterns = [
url(r'^hello$', index),
]
| [
"rahulsingh2240@gmail.com"
] | rahulsingh2240@gmail.com |
e54b375edca33484ad465ffc60506649f2ce2916 | fcef3602a044a82b75eb1bdee87a5eb347a56769 | /recolo/demoData/dataverseData.py | 8c45c01e537a093ab4a725407bd8dc7ae84b4a57 | [
"MIT"
] | permissive | PolymerGuy/recolo | 5cb9c6b01d7eeb4108710606341518aa13efc1d1 | 05b14f0834fa675579eabdf43fac046259df19bb | refs/heads/master | 2023-04-12T00:17:50.150126 | 2022-03-11T12:42:44 | 2022-03-11T12:42:44 | 343,329,602 | 4 | 1 | MIT | 2022-03-05T08:04:49 | 2021-03-01T07:39:40 | Python | UTF-8 | Python | false | false | 4,815 | py | import numpy as np
import wget
import logging
import requests
from bs4 import BeautifulSoup
from urllib import parse
from recolo import list_files_in_folder
import os
import pathlib
cwd = pathlib.Path(__file__).parent.resolve()
"""" This module provides data from an impact hammer experiment where the hammer was knocked onto a steel plate while
the deformation of the plate was measured using using a deflectometry setup.
The data consists of force readings from the impact hammer as well as the images of the mirror plate during deformation.
The dataset is hosted at https://dataverse.no/ and is automatically downloaded when ImpactHammerExperiment is
instantiated the first time.
"""
def find_dataverse_files(repo_url, params={}):
dataverse_base = '{uri.scheme}://{uri.netloc}/'.format(uri=parse.urlparse(repo_url))
response = requests.get(repo_url, params=params)
if response.ok:
response_text = response.text
else:
return response.raise_for_status()
soup = BeautifulSoup(response_text, 'html.parser')
file_urls = [parse.urljoin(dataverse_base, node.get('href')) for node in soup.find_all('a')]
return file_urls
class DataverseDataSet(object):
def __init__(self, DOI, path_to_data=None, new_data_folder_name="data"):
"""
Download data from an Dataverse repository.
Parameters
----------
links_to_data : list
Paths to all files in the repo
path_to_data : str
A path to the preferred location where the data is stored.
new_data_folder_name : str
The name of the new data folder
Examples
--------
DOI = "10.18710/TKS7SI"
data = DataverseDataSet(DOI,new_data_folder_name="hammer_data")
"""
self._logger_ = logging.getLogger(self.__class__.__name__)
self._check_for_file_name_ = "00_ReadMe.txt"
self._dataverse_file_repo = 'https://dataverse.no/api/datasets/:persistentId/dirindex?persistentId=doi:'
self.DOI = DOI
self.repo_url = self._dataverse_file_repo + DOI
self._file_urls_ = find_dataverse_files(self.repo_url)
self._logger_.info("Found %i files in the repo" % len(self._file_urls_))
if path_to_data:
self.path_to_data_folder = path_to_data
else:
self.path_to_data_folder = os.path.join(cwd, new_data_folder_name)
self._logger_.info("No image path was specified.\nImage folder is: %s" % self.path_to_data_folder)
if self.__data_is_downloaded__():
self._logger_.info("Experimental data was found locally")
else:
self._logger_.info("Downloading experimental data")
try:
self.__download_dataset__()
except Exception as e:
self._logger_.exception(e)
raise
self.file_paths = list_files_in_folder(self.path_to_data_folder, file_type='', abs_path=True)
def __data_is_downloaded__(self):
readme_file_path = os.path.join(self.path_to_data_folder, self._check_for_file_name_)
return os.path.exists(readme_file_path)
def __download_dataset__(self):
if not os.path.exists(self.path_to_data_folder):
self._logger_.info("Creating folder %s" % self.path_to_data_folder)
os.makedirs(self.path_to_data_folder)
for link_to_data in self._file_urls_:
try:
wget.download(link_to_data, out=self.path_to_data_folder)
except Exception as e:
raise
class ImpactHammerExperiment(DataverseDataSet):
def __init__(self, path_to_data=None):
"""
Data from an experiment with an impact hammer and a deflectometry setup.
Parameters
----------
path_to_data : str
A path to the preferred location where the data is stored.
"""
self._hammer_force_filename_ = "hammer_force.txt"
self._hammer_time_filename_ = "hammer_time.txt"
DOI = '10.18710/TKS7SI'
super().__init__(DOI, path_to_data=path_to_data, new_data_folder_name="impact_hammer_data")
self.img_paths = list_files_in_folder(self.path_to_data_folder, file_type=".tif", abs_path=True)
self.n_images = len(self.img_paths)
def hammer_data(self):
"""
Impact hammer data
Returns
-------
force, disp : ndarray, ndarray
"""
path_to_impact_hammer_force = os.path.join(self.path_to_data_folder, self._hammer_force_filename_)
path_to_impact_hammer_time = os.path.join(self.path_to_data_folder, self._hammer_time_filename_)
force = np.genfromtxt(path_to_impact_hammer_force)
time = np.genfromtxt(path_to_impact_hammer_time)
return force, time
| [
"sindre.n.olufsen@ntnu.no"
] | sindre.n.olufsen@ntnu.no |
82014c6a6505ef2b805778e153ee1eb3fe9cf736 | a54baf26c2404f47702efe13e6243916d65c9a92 | /models/helpers.py | 23e1839d79f9010cc3f80cee88f35d5e2034bdeb | [] | no_license | arjunchatterjee196/chatbot | 8c78a07ac49c5bf3f908dfa30b2c3bc241f6db63 | a6456d281739e3b802f25352367f43b970aab01b | refs/heads/master | 2020-06-19T16:40:26.249026 | 2016-11-26T19:36:19 | 2016-11-26T19:36:19 | 74,846,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | import array
import numpy as np
import tensorflow as tf
from collections import defaultdict
def load_vocab(filename):
vocab = None
with open(filename) as f:
vocab = f.read().splitlines()
dct = defaultdict(int)
for idx, word in enumerate(vocab):
dct[word] = idx
return [vocab, dct]
def load_glove_vectors(filename, vocab):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
tf.logging.info("Found {} out of {} vectors in Glove".format(num_vectors, len(vocab)))
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def build_initial_embedding_matrix(vocab_dict, glove_dict, glove_vectors, embedding_dim):
initial_embeddings = np.random.uniform(-0.25, 0.25, (len(vocab_dict), embedding_dim)).astype("float32")
for word, glove_word_idx in glove_dict.items():
word_idx = vocab_dict.get(word)
initial_embeddings[word_idx, :] = glove_vectors[glove_word_idx]
| [
"arjruler93@gmail.com"
] | arjruler93@gmail.com |
adf624ec2cb3565ac1f3dc6efd122adb4042a1b1 | e74d3ecb941c358b03a8283a3530a66d7eafe7d4 | /run-mongo.py | 21649fbbe46f15ae70a683c4db04e16a22c65ded | [] | no_license | tracktor1/mongo | 53c72edbbc46b1719ead24673993899a2ba77a67 | f4ea46d734794e00e5886eabb678190543856eec | refs/heads/master | 2020-04-03T13:35:54.448050 | 2018-11-03T21:58:08 | 2018-11-03T21:58:08 | 155,289,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | #!/usr/bin/env python
import os
import sys
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--user", help='create user only', default="no_arg")
args = parser.parse_args()
if args.user == "no_arg":
subprocess.call(["ansible-playbook", "-K", "./mongo.yml"])
elif args.user == 'user':
subprocess.call(["ansible-playbook", "-K", "./mongo.yml", "--tags", "user"])
else:
print ("Wrong Parameter, only option is -u user") | [
"39525540+tracktor1@users.noreply.github.com"
] | 39525540+tracktor1@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.