blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bdd19fb17c474f5eeaa628fb70529da272f562b7
|
e4bb81b29b21b29443a8138c7a64562b981275e8
|
/data/dataloader.py
|
5e8054ed2df8dbf103a4775ead7329b51eddd697
|
[] |
no_license
|
DaKup/HandPoseShapeVAE
|
afc8a8fe400e5d030cc821cb371282237cf85530
|
6ca6a173810ad42a99ead355e719606e17d8c832
|
refs/heads/master
| 2022-12-03T22:47:55.400480
| 2020-08-09T16:54:14
| 2020-08-09T16:54:14
| 284,481,182
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,545
|
py
|
import sys
import os
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
from data.importers import MSRA15Importer
from data.dataset import MSRA15Dataset, MSRA15ImporterWrapper
from util.handdetector import HandDetector
def create_dataloader(data_dir: Path, batch_size: int, normalize_input=True, max_persons=-1, train_mode="mixed", pose_dict_path=None, shuffle=False):
if os.path.isfile(data_dir):
data = np.load(data_dir)
if 'train_gt3DCrop_norm' in data:
dataset = torch.utils.data.TensorDataset(torch.from_numpy(data['train_data']), torch.from_numpy(data['train_gt3DCrop_norm']))
else:
dataset = torch.utils.data.TensorDataset(torch.from_numpy(data['train_data']))
dataloader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=None,
batch_sampler=None,
num_workers=0,
#collate_fn=default_collate,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None
)
return dataloader
importer = MSRA15ImporterWrapper(MSRA15Importer(data_dir), normalize_input=normalize_input, max_persons=max_persons)
if pose_dict_path != None:
dataset = MSRA15Dataset(importer, train_mode=train_mode, pose_dict_path=pose_dict_path, batch_size=batch_size)
batch_size = 1
else:
dataset = MSRA15Dataset(importer)
dataloader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=None,
batch_sampler=None,
num_workers=0,
#collate_fn=default_collate,
pin_memory=False,
drop_last=False,
timeout=0,
worker_init_fn=None
)
return dataloader
def load_frame(filename: Path, com=None, size=(250, 250, 250), dsize=(128, 128), docom=False, cube=None):
# No ImporterWrapper here, because we don't want to load all sequences, we just need a way to load frames from single files:
importer = MSRA15Importer(basepath=None)
dpt = importer.loadDepthMap(filename)
hand_detector = HandDetector(dpt, importer.fx, importer.fy, refineNet=importer.refineNet, importer=importer)
if not hand_detector.checkImage(1.):
sys.exit("No hand detected")
#try:
cropped_hand_depth, joint_transf_mat, com = hand_detector.cropArea3D(com=None, size=size, dsize=dsize, docom=docom) # size=config['cube']
# except UserWarning:
# #sys.exit("Skipping file {}, no hand detected".format(filename))
# print("Skipping file {], no hand detected".format(filename))
# return None
if cube == None:
cube = [] # Min/Max
else:
# normalize input [-1, 1]
cropped_hand_depth[cropped_hand_depth == 0] = com[2] + (cube[2] / 2.)
cropped_hand_depth = cropped_hand_depth - com[2]
cropped_hand_depth = cropped_hand_depth / (cube[2] / 2.)
return cropped_hand_depth, joint_transf_mat, com
# input = torch.from_numpy(cropped_hand_depth)
# batch_input = input.unsqueeze(0) # add 1 for the batch dimension
# batch_input = batch_input.to(args.device)
def save_wavefront(filename: Path, dpt):
with open(filename, "w") as obj:
for xyz in dpt:
x = xyz[0]
y = xyz[1]
z = xyz[2]
obj.write("v {} {} {}\n".format(x, y, z))
|
[
"daniel.kup@student.tugraz.at"
] |
daniel.kup@student.tugraz.at
|
41b62148741f8eeacc4cd75923e3221f352c447a
|
a164516eedade96dfa6b7ddf9f58c07bc34f0d7d
|
/portfolio/settings.py
|
680bcc918481899585ad347cb93a3ca5c178fc5c
|
[] |
no_license
|
Kumar-Roopak/portfolio-project
|
d728632fb926a7085569da91de0311cac4e5fd14
|
dee575fe007da49751cff5ec9f5088461323017f
|
refs/heads/master
| 2020-04-02T01:04:41.694807
| 2018-10-19T22:21:03
| 2018-10-19T22:21:03
| 152,787,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,513
|
py
|
"""
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'uqw2u&q1+*ykw^7wrh11!mnjxs11#m!zm4+#8#*-uz_(z&d7b!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog.apps.BlogConfig',
'jobs.apps.JobsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'portfoliodb',
'USER': 'postgres',
'PASSWORD': 'Brooke@9811463488',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'portfolio/static/')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
try:
from .local_settings import *
except ImportError:
pass
|
[
"kum17003@byui.edu"
] |
kum17003@byui.edu
|
ae132d5fb1641981f5167a9e88f4f1b5237558fb
|
8682252f94375ab0300499ba1e1b412790369519
|
/cryptoblotter/trades/utils.py
|
9b7554ee31239a4b70934ddf2330d9701890b376
|
[
"MIT"
] |
permissive
|
PAV-Laboratory/cryptoblotter
|
713e245f8343980b9de3c7290c9a29b4baa768de
|
f573592a3638fbc6cae24d76305de36b932949c6
|
refs/heads/main
| 2023-04-21T01:03:24.934536
| 2021-05-04T00:15:57
| 2021-05-04T03:23:43
| 391,418,996
| 1
| 0
|
MIT
| 2021-07-31T17:14:37
| 2021-07-31T17:14:37
| null |
UTF-8
|
Python
| false
| false
| 762
|
py
|
from decimal import Decimal
def decimal_to_str(data: dict):
for key, value in data.items():
if isinstance(value, dict):
data[key] = decimal_to_str(value)
elif isinstance(value, list):
for index, v in enumerate(value):
if isinstance(v, dict):
data[key][index] = decimal_to_str(v)
elif isinstance(v, Decimal):
data[key][index] = str(v)
elif isinstance(value, Decimal):
data[key] = str(value)
return data
def normalize_symbol(feed: str, symbol: str):
for char in ("-", "/", "_"):
symbol = symbol.replace(char, "")
if feed == "upbit":
symbol = symbol[3:] + symbol[:3] # Reversed
return symbol
|
[
"globophobe@gmail.com"
] |
globophobe@gmail.com
|
c48dfebf64c01e9992f83ddc243026953d873e31
|
312fae7e287682d1eba7bf43e65bdfeff2e33e6a
|
/APIServer/commons/form_api.py
|
6ae70295e081e88832049c63d9db7968e25a43df
|
[
"MIT"
] |
permissive
|
gcallah/socnet
|
3ae9582a7d8d3871806c2820efb1b13a2e4f747a
|
dd8f0894e8ed071df7f78f3b61dddc7d5cf7be61
|
refs/heads/master
| 2023-02-26T07:02:34.517851
| 2020-07-27T14:21:22
| 2020-07-27T14:21:22
| 208,861,159
| 1
| 1
|
MIT
| 2023-02-16T08:31:29
| 2019-09-16T17:44:56
|
Python
|
UTF-8
|
Python
| false
| false
| 627
|
py
|
from APIServer.commons.api_utils import read_json
def get_alert_form(path):
return read_json(path)
def create_alert_json(alert_tuple):
'''
Create alert_json from an alert tuple
'''
alert_json = {}
alert_json['datetime'] = alert_tuple[1]
alert_json['zipcode'] = alert_tuple[2]
alert_json['city'] = alert_tuple[3]
alert_json['state'] = alert_tuple[4]
alert_json['country'] = alert_tuple[5]
alert_json['type'] = alert_tuple[6]
alert_json['description'] = alert_tuple[7]
alert_json['severity'] = alert_tuple[8]
alert_json['sender'] = alert_tuple[9]
return alert_json
|
[
"wm1065@nyu.edu"
] |
wm1065@nyu.edu
|
cb5d8e1a4128394cd188b847c0d6f216a6f30120
|
d3ae46a2d5277574ce14539f7adbcb961d05e7ca
|
/version/bin/wheel
|
e1d092ecd632e75cdc68f8c00693ebb066f6b5c3
|
[] |
no_license
|
muw2/CS1XA3
|
b8f65c4a6569fb5db42902812193b4be945964e4
|
934afe2361f868e331d666353d32a129f1056e64
|
refs/heads/master
| 2020-04-20T03:30:41.073572
| 2019-04-25T04:32:52
| 2019-04-25T04:32:52
| 168,251,805
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
#!/home/muw2/CS1XA3/version/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"muw2@mcmaster.ca"
] |
muw2@mcmaster.ca
|
|
5a48ce3402299b837458ddeb22dfd7fda978291b
|
2102d034f9a53835b33434c5270302f3c047d7b3
|
/backend-scv/api/scripts/installtools.py
|
2da79645ccf30cb3c00f01fcb3de6d8a61847a38
|
[] |
no_license
|
fdemian/scv-react-test
|
acd9614f52f142066034594a5b48386dbaba2831
|
66550fa75c88e1be5dd70678e13fb3a9acd3179e
|
refs/heads/master
| 2023-08-28T17:32:52.455364
| 2021-10-28T21:53:14
| 2021-10-28T21:53:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,015
|
py
|
import sys
from subprocess import call
script_path = "api/scripts"
def install_packages(requirements_file):
return_code = call([sys.executable, '-m', 'pip', 'install', '-r', requirements_file])
if return_code is not 0:
raise Exception("unable to install one or more packages")
def call_scripts(scripts, script_path):
for script in scripts:
return_code = call([sys.executable, script_path + "/" + script])
if return_code is not 0:
raise Exception("script: " + script + " failed to execute")
def setup(name, requirements_file, scripts):
try:
"""
print(" ==============" + name + "============== ", end="\n\n\n")
print("Installing packages", end="\n")
install_packages(requirements_file)
"""
print("\n")
print("========= Calling scripts ============== ", end="\n\n\n")
call_scripts(scripts, script_path)
except Exception as inst:
print("The following error ocurred: " + str(inst))
|
[
"federicodamiancaminiti@gmail.com"
] |
federicodamiancaminiti@gmail.com
|
49fd48e6fbab74f7981b53f6c31fe6d3c3620d30
|
0e822323071d972c4a2a8b3c4e778a9144b2b5b4
|
/databuilder/models/table_source.py
|
d0b7156e4977073bb77abd81647e5fcfdba70777
|
[
"Apache-2.0"
] |
permissive
|
duyet/amundsendatabuilder
|
f3bed53c93d19bfda5ae7df1bd456214e442012f
|
21a763add3c00c34b4f4c2d9809f59e50fb264c8
|
refs/heads/master
| 2023-04-09T05:08:07.816951
| 2020-07-01T16:34:56
| 2020-07-01T16:34:56
| 277,763,943
| 0
| 0
|
Apache-2.0
| 2023-04-03T23:05:59
| 2020-07-07T08:43:44
| null |
UTF-8
|
Python
| false
| false
| 3,940
|
py
|
from typing import Any, Dict, List, Union # noqa: F401
from databuilder.models.neo4j_csv_serde import Neo4jCsvSerializable, NODE_KEY, \
NODE_LABEL, RELATION_START_KEY, RELATION_START_LABEL, RELATION_END_KEY, \
RELATION_END_LABEL, RELATION_TYPE, RELATION_REVERSE_TYPE
from databuilder.models.table_metadata import TableMetadata
class TableSource(Neo4jCsvSerializable):
# type: (...) -> None
"""
Hive table source model.
"""
LABEL = 'Source'
KEY_FORMAT = '{db}://{cluster}.{schema}/{tbl}/_source'
SOURCE_TABLE_RELATION_TYPE = 'SOURCE_OF'
TABLE_SOURCE_RELATION_TYPE = 'SOURCE'
def __init__(self,
db_name, # type: str
schema, # type: str
table_name, # type: str
cluster, # type: str
source, # type: str
source_type='github', # type: str
):
# type: (...) -> None
self.db = db_name.lower()
self.schema = schema.lower()
self.table = table_name.lower()
self.cluster = cluster.lower() if cluster else 'gold'
# source is the source file location
self.source = source
self.source_type = source_type
self._node_iter = iter(self.create_nodes())
self._relation_iter = iter(self.create_relation())
def create_next_node(self):
# type: (...) -> Union[Dict[str, Any], None]
# return the string representation of the data
try:
return next(self._node_iter)
except StopIteration:
return None
def create_next_relation(self):
# type: (...) -> Union[Dict[str, Any], None]
try:
return next(self._relation_iter)
except StopIteration:
return None
def get_source_model_key(self):
# type: (...) -> str
return TableSource.KEY_FORMAT.format(db=self.db,
cluster=self.cluster,
schema=self.schema,
tbl=self.table)
def get_metadata_model_key(self):
# type: (...) -> str
return '{db}://{cluster}.{schema}/{table}'.format(db=self.db,
cluster=self.cluster,
schema=self.schema,
table=self.table)
def create_nodes(self):
# type: () -> List[Dict[str, Any]]
"""
Create a list of Neo4j node records
:return:
"""
results = [{
NODE_KEY: self.get_source_model_key(),
NODE_LABEL: TableSource.LABEL,
'source': self.source,
'source_type': self.source_type
}]
return results
def create_relation(self):
# type: () -> List[Dict[str, Any]]
"""
Create a list of relation map between owner record with original hive table
:return:
"""
results = [{
RELATION_START_KEY: self.get_source_model_key(),
RELATION_START_LABEL: TableSource.LABEL,
RELATION_END_KEY: self.get_metadata_model_key(),
RELATION_END_LABEL: TableMetadata.TABLE_NODE_LABEL,
RELATION_TYPE: TableSource.SOURCE_TABLE_RELATION_TYPE,
RELATION_REVERSE_TYPE: TableSource.TABLE_SOURCE_RELATION_TYPE
}]
return results
def __repr__(self):
# type: () -> str
return 'TableSource({!r}, {!r}, {!r}, {!r}, {!r})'.format(self.db,
self.cluster,
self.schema,
self.table,
self.source)
|
[
"noreply@github.com"
] |
duyet.noreply@github.com
|
43f5d197c6188ed85aa8adfb2c0b9986d4d53085
|
d73d0268aa090cdd08497145fa48619ffb0ef01a
|
/APIdashboard/models.py
|
709e54101f394f2d223f5ca936d8b595712f3594
|
[] |
no_license
|
kademanec/apimanager
|
de94e65b61b79ae53f39ed50823b20db8b484c13
|
84bc2b0e492f7fb0ae722aff1eef2ffa4998d13b
|
refs/heads/master
| 2020-08-07T00:22:48.938687
| 2019-10-07T08:09:20
| 2019-10-07T08:09:20
| 213,217,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class APIs(models.Model):
name = models.CharField(max_length=200)
link = models.CharField(max_length=200)
requesting = models.TextField(null = True)
def __str__(self):
return self.name
|
[
"kademanec@gmail.com"
] |
kademanec@gmail.com
|
4007333f9e30962bcb94d603eb1809a924de0442
|
e3d969e2c9e4b57f4f7d58af5e44a00aa8fb15d3
|
/0463 Island Perimeter.py
|
86247af9f1d30d1779eefb1f33573fa65d92750a
|
[
"MIT"
] |
permissive
|
kevin-fang/leetcode
|
2744ff01e791db6f60edf946ef71451fae92ef6f
|
3958f888b30bb3e29916880ecec49b3870a0bea3
|
refs/heads/master
| 2022-12-15T07:50:01.056016
| 2020-09-10T03:47:53
| 2020-09-10T03:47:53
| 294,296,037
| 3
| 0
|
MIT
| 2020-09-10T03:47:39
| 2020-09-10T03:47:38
| null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
class Solution:
def islandPerimeter(self, grid: List[List[int]]) -> int:
ans = 0
for y in range(len(grid)):
for x in range(len(grid[0])):
if grid[y][x] == 1:
ans += 4
for dy, dx in [[1,0], [0,1], [-1,0], [0,-1]]:
ny, nx = y+dy, x+dx
if ny >= 0 and ny < len(grid) and nx >= 0 and nx < len(grid[0]) and grid[ny][nx] == 1:
ans -= 1
return ans
|
[
"mdabedincs@gmail.com"
] |
mdabedincs@gmail.com
|
21b207fd26fd58a3a0685aa18e3709c83c8aa764
|
598ab0b2211db7c808c14c2b83293c84fb9d546d
|
/comma_code.py
|
4116427975bd0499532d8f03a2c0b6cb2b5d089d
|
[] |
no_license
|
mojeedkusimo/comma-code
|
2d39e6736432213d64ab6145f73a75203349da50
|
7869cb0c0be472bce8c389ded50ca6ce71b70fb5
|
refs/heads/master
| 2021-09-23T23:36:06.789418
| 2018-09-29T08:47:17
| 2018-09-29T08:47:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,656
|
py
|
print ("This program takes in any food item you input and displays it in an organised manner")
print ("Hello there! Can you please enter your name?")
user = input ()
print ("Welcome!",user)
while True:
def food (items): # a function is defined with "item" as the single parameter
no_of_items = 0 # a counter for the number of items in the list is declared
items_string = "" # a string for the list is also formed
while no_of_items < len (items) - 1: # condition for compiling all the items except the last one
items_string += items [no_of_items] + ", " # each item and ", " is added to the string via concatenation
no_of_items += 1 # counter is increased accordingly
return items_string + "and " + items [no_of_items] # the last item and "and" is brought in
meal = [] # program creates an empty list
print ("Enter any food of your choice and press enter when done")
usermeal = input () # asks user to enter a food item
while usermeal != "": # the program continues to run until nothing is entered and user press enter
meal = meal + [usermeal] #each item entered by the user is added to the list
usermeal = input () # asks for more entry
print (food (meal))
close = input ("\nWould you like to exit?\n 1.Yes\t 2. No\n Choice:")
if close == "2": # conditions for quitting the program
continue
elif close == "1":
print ("Thank you for your time,",user,", do have a nice day!")
break
|
[
"noreply@github.com"
] |
mojeedkusimo.noreply@github.com
|
8358005976800dd1cebd8e9f73b7c7a21511b505
|
4e879e994720100a9354895af2bb9be33b38a42b
|
/xUdemy_xlwings/S4_MontecarloSimulation_Part_1/S4_32_Simple_MonteCarlo_Simulation_01.py
|
7e59f365bbe892740b7a699368b2e2c7ab95f061
|
[] |
no_license
|
pepitogrilho/learning_python
|
80314ec97091238ed5cc3ed47422d2e6073a3280
|
bbdc78a9a0513c13d991701859bcfe7a8e614a49
|
refs/heads/master
| 2023-04-09T15:07:08.866721
| 2023-04-02T18:45:47
| 2023-04-02T18:45:47
| 230,527,165
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
# -*- coding: utf-8 -*-
"""
"""
import xlwings as xw
import numpy as np
import matplotlib.pyplot as plt
app = xw.apps.active
#Open workbook
wb = xw.Book("real_estate.xlsx")
inp = wb.sheets[0]
#Define ranges
inp["D20"].name = "cpi"
inp["D25"].name = "ppf"
inp["D40"].name = "cost"
#inp["G24:G25"].name = "performance"
inp["G24"].name = "performance_multiple"
inp["G25"].name = "performance_IRR"
#Number of simulations
sims = 100
#CPI: Probability Distribution (normal)
cpi_exp = 0.02
cpi_std = 0.01
cpi_pd = np.random.normal(cpi_exp, cpi_std, sims)
plt.hist(cpi_pd, bins = 100)
plt.show()
#PPF: Probability Distribution (normal)
ppf_exp = 23
ppf_std = 3
ppf_pd = np.random.normal(ppf_exp, ppf_std, sims)
plt.hist(ppf_pd, bins = 100)
plt.show()
#COST: Probability Distribution (normal)
cost_exp = 250000
cost_std = 50000
cost_pd = np.random.normal(cost_exp, cost_std, sims)
plt.hist(cost_pd, bins = 100)
plt.show()
#PERFORMANCE
results=[]
for i in range(sims):
inp["cpi"].value = np.random.normal(cpi_exp, cpi_std)
inp["ppf"].value = np.random.normal(ppf_exp, ppf_std)
inp["cost"].value = np.random.normal(cost_exp, cost_std)
results.append(inp["performance_multiple"].value)
plt.hist(results, bins = 100)
plt.show()
############## 2:09/14:33
wb.close()
|
[
"pepitogrilho@gmail.com"
] |
pepitogrilho@gmail.com
|
2033c29c81fe3f673e5a257fd45f89870796a944
|
1a87d286396a2c6f6b6ac7c53495f80690836c7b
|
/CTCI/CTCI_4_4_buildListOfNodeAtSameDepth_method2.py
|
4fe9d49e79cb791b7a53cb0e8efdca5d0ca95c4d
|
[] |
no_license
|
kickbean/LeetCode
|
14d33eea9dd70821114ca6d7e1a32111d4d64bf0
|
92e4de152e2aae297ef0e93c9eea61d7ad718f4e
|
refs/heads/master
| 2016-09-10T14:38:33.692759
| 2014-04-08T00:26:51
| 2014-04-08T00:26:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
'''
CTCI 4-4
Given a binary search tree, design an alghrithm to create a linked list of all the nodes at each depth
Algorithm: breath first search and record the level of each node
Created on Nov 28, 2013
@author: Songfan
'''
from queue import Queue
from buildListOfNodeAtSameDepth_4_4 import LinkedList, BST
''' BFS: using a queue '''
class BST2(BST):
def buildNodeList(self):
result = {}
if self.root:
q = Queue()
visitedNode = []
q.enqueue(self.root)
q.enqueue('EndLevelFlag')
level = 0
while(not q.isEmpty()):
tmp = q.dequeue()
if tmp == 'EndLevelFlag':
# finish building the current level
level += 1
else:
# add node to Linked List
visitedNode.append(tmp)
if level in result.keys():
result[level].append(tmp)
else:
aList = LinkedList()
aList.append(tmp.value)
result[level] = aList
if tmp.left: q.enqueue(tmp.left)
if tmp.right: q.enqueue(tmp.right)
# add dummy item to represent the end of level
q.enqueue('EndLevelFlag')
return result
t = BST()
print t
t.insert(5)
t.insert(1)
t.insert(7)
t.insert(2)
t.insert(9)
t.insert(8)
print t
print t.root.getDepth()
print t.root.left.getDepth()
h = t.buildNodeList()
for k in h.keys():
print h[k]
|
[
"songfan.yang@gmail.com"
] |
songfan.yang@gmail.com
|
35d2a69ebef931e4686710e2468cd9fc63e8d523
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03485/s456539182.py
|
60831196a61282637ddaf30b97852a9b0b19c178
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
import sys
def input(): return sys.stdin.readline().strip()
def resolve():
a,b=map(int, input().split())
print(-(-(a+b) // 2))
resolve()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9d27dc2a1cb0b24902ffe100874c06eea38b133f
|
408520088af1a66631bca4f7340256e28f3a4f7e
|
/pset6/credit/credit.py
|
ce030d32bb1eea9831e68f24795780949f6174fe
|
[] |
no_license
|
pidus/cs50
|
a00d903ef6a33d710e2109fc61bfe566f65a396d
|
55e7e9053a7a4a339ae3d5cd2b2830e1a3e6630c
|
refs/heads/master
| 2023-02-08T19:54:56.569014
| 2020-12-31T17:09:03
| 2020-12-31T17:09:03
| 315,669,764
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 975
|
py
|
from cs50 import get_int
#get card number from user
def get_valid_input():
uin = get_int("Number: ")
while uin < 0 or uin > 9999999999999999:
uin = get_int("Number: ")
return uin
card = str(get_valid_input())
length = len(card)
#Luhn’s Algorithm
luhn = 0
#every other digits starting w/ 2nd to last
for a in range(length-2, -1, -2):
tmp = int(card[a])*2
if tmp < 10:
luhn += tmp
else:
luhn += tmp-9
# rest of the digits
for b in range(length-1, -1, -2):
luhn += int(card[b])
remainder = luhn%10
if remainder == 0:
if length == 15 and int(card[0]) == 3 and (int(card[1]) == 4 or int(card[1])) == 7:
print("AMEX")
elif length == 16 and int(card[0]) == 5 and (int(card[1]) == 1 or int(card[1]) == 2 or int(card[1]) == 3 or int(card[1]) == 4 or int(card[1]) == 5):
print("MASTERCARD")
elif (length == 13 or length == 16) and int(card[0]) == 4:
print("VISA")
else:
print("INVALID")
|
[
"8981437939@pm.me"
] |
8981437939@pm.me
|
b5e976203b2ada9312a2989842387bba512a0df5
|
7bb4ad7008304ed53e41c405e23a7374e5183490
|
/svr.py
|
2994a37b0ffa43f046deb81752fbd8b1a16c580e
|
[] |
no_license
|
naveen700/Support-Vector-Regression
|
a514148940c126549a2c67c1ca8e524e20017ffd
|
bdad0b7b6cae29fdf237c57db844b4860f70bd64
|
refs/heads/master
| 2020-05-18T05:26:38.793692
| 2019-05-02T07:15:08
| 2019-05-02T07:15:08
| 184,207,261
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,837
|
py
|
# SVR
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2:3].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
# Fitting SVR to the dataset
from sklearn.svm import SVR
# svr does not include feature scaling.
# we need to define kernel ,for non-linear we use most commonly gaussian kernel which is rbf kernel ,this is already default choice for kernel.
regressor = SVR(kernel='rbf')
regressor.fit(X,y)
data = [6.5]
data = np.array(data)
data = data.reshape(1,-1)
# Predicting a new result
y_pred = regressor.predict(sc_X.transform(data))
# y_pred is scaled prediction we need to move it back to orgincal scale. so we need to inverse the scale tranformation.
y_pred = sc_y.inverse_transform(y_pred)
# Visualising the SVR results
plt.scatter(X, y, color = 'red')
plt.plot(X, regressor.predict(X), color = 'blue')
plt.title('Truth or Bluff (SVR)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the SVR results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.01) # choice of 0.01 instead of 0.1 step because the data is feature scaled
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (SVR)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
[
"naveen.rana@deeplogictech.com"
] |
naveen.rana@deeplogictech.com
|
1fef2f313e198ebc6f4223b0473b33d66de7a432
|
4c839dfe650b992b4d4927d8b47c5a1745c6e904
|
/app_hello.py
|
6c84696806cd3421bab959a173152e0d4449664c
|
[] |
no_license
|
JThanat/femto-mesos
|
98c205aa3d1dbd6f8bba6a2bd0299214da838b06
|
e7b989b0fb7ae4ff4a66ec92a4d97e29fcec4918
|
refs/heads/master
| 2021-05-23T05:29:50.842680
| 2017-11-29T01:06:16
| 2017-11-29T01:06:16
| 95,017,728
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,708
|
py
|
#!/usr/bin/env python
# This main is adapted from APACHE Mesos original test_framework.py
import json
import os
import sys
import mesos.native
from mesos.interface import mesos_pb2
from framework.framework import HelloWorldScheduler
from framework.job import Job
if __name__ == "__main__":
# Read JSON File
json_file = sys.argv[2]
jobs = []
with open(json_file) as json_data:
json_object = json.load(json_data)
jobs_array = json_object["jobs"]
print jobs_array
# Prepare job object Array
for j in jobs_array:
jobs.append(Job.fromJSON(j))
# Framework Info, Executor and Driver
framework = mesos_pb2.FrameworkInfo()
framework.user = ""
framework.name = "hello-world"
framework.checkpoint = True
implicitAcknowledgements = 1
if len(sys.argv) != 3:
print
"Usage: %s master" % sys.argv[0]
sys.exit(1)
executor = mesos_pb2.ExecutorInfo()
executor.executor_id.value = "default"
executor.command.value = os.path.abspath("./test-executor")
executor.name = "Test Executor (Python)"
executor.source = "python_test"
# STEP1: Get Framework Info
framework = mesos_pb2.FrameworkInfo()
framework.user = "" # Have Mesos fill in the current user.
framework.name = "Test Framework (Python)"
framework.checkpoint = True
driver = mesos.native.MesosSchedulerDriver(
HelloWorldScheduler(implicitAcknowledgements, executor, jobs),
framework,
sys.argv[1],
implicitAcknowledgements)
status = 0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1
# Ensure that the driver process terminates.
driver.stop()
sys.exit(status)
|
[
"thanat.jat@gmail.com"
] |
thanat.jat@gmail.com
|
94e887a5648716db788c45167f576b57c0e028f2
|
d0f11aa36b8c594a09aa06ff15080d508e2f294c
|
/leecode/1-500/1-100/42-接雨水.py
|
691adfc3090fd9501f149077778714d19403a479
|
[] |
no_license
|
saycmily/vtk-and-python
|
153c1fe9953fce685903f938e174d3719eada0f5
|
5045d7c44a5af5c16df5a3b72c157e9a2928a563
|
refs/heads/master
| 2023-01-28T14:02:59.970115
| 2021-04-28T09:03:32
| 2021-04-28T09:03:32
| 161,468,316
| 1
| 1
| null | 2023-01-12T05:59:39
| 2018-12-12T10:00:08
|
Python
|
UTF-8
|
Python
| false
| false
| 292
|
py
|
def trap(height):
ans = 0
h1 = 0
h2 = 0
for i in range(len(height)):
h1 = max(h1, height[i])
h2 = max(h2, height[-i-1])
ans = ans + h1 + h2 - height[i]
return ans - len(height)*h1
height = [0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1]
print(trap(height))
|
[
"1786386686@qq.com"
] |
1786386686@qq.com
|
0904d4495354c4ddfd0df4dc2ce7126c46b98949
|
8ffc004ca05591278cb5f2dea150b74594a2f45d
|
/baseline/lib/callbacks.py
|
a8448a9c353b4fca5907ae5798381eb6dd28e0dd
|
[
"MIT"
] |
permissive
|
Ravirajadrangi/predicting-paraphrases
|
5e59e18f7006d2f68a22540aef0318adc2be0e68
|
348c99c943d9169b8d5ced043c1ae6119e390b27
|
refs/heads/master
| 2021-01-15T19:14:15.592922
| 2017-03-18T05:49:11
| 2017-03-18T05:49:11
| 99,812,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,179
|
py
|
import keras
from models.paraphrase import SiameseParaphrase
class ParaphraseCallback(keras.callbacks.Callback):
"""Run the paraphrase model to test accuracy"""
def __init__(self,train_inputs,train_labels,test_inputs,test_labels):
self.train_left = train_inputs[:,:,:,0]
self.train_right = train_inputs[:,:,:,1]
self.train_labels = train_labels
self.test_left = test_inputs[:,:,:,0]
self.test_right = test_inputs[:,:,:,1]
self.test_labels = test_labels
def on_batch_end(self, batch, logs={}):
autoencoder = self.model
print self.train_left
print self.train_right
input_shape = (51,50) # (n_training, max_sentence_length, embedding_size)
siamese = SiameseParaphrase(autoencoder,input_shape)
print "Fitting SiameseModel:"
siamese.fit(self.train_left, self.train_right ,self.train_labels)
print "Evaluating SiameseModel in training data:"
siamese.evaluate(self.train_left, self.train_right, self.train_labels)
print "Evaluating SiameseModel in testing data:"
siamese.evaluate(self.test_left, self.test_right, self.test_labels)
|
[
"maxkferg@gmail.com"
] |
maxkferg@gmail.com
|
e58374bf9aa04da228a0242c6429794fc458b7bd
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/galex_j03230-2945/sdB_GALEX_J03230-2945_coadd.py
|
2027385d6aa7d7738f0c0def398550be993ebdf8
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 453
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[50.76725,-29.758464], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_GALEX_J03230-2945/sdB_GALEX_J03230-2945_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_GALEX_J03230-2945/sdB_GALEX_J03230-2945_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
1a32bd9e6b7fb5c4f46187766b732e3db8f7d753
|
da302ba0c71722591fb19ceeb24bb1f9d60b128a
|
/text.py
|
4189183f68dd294cc99a022df066d84c903427dc
|
[
"MIT"
] |
permissive
|
shagabutdinov/sublime-context
|
f4352b8126645f23a11465cc70ab6dd52588200b
|
2a088a4dd9d7d516ed49f4d1bd85696fa6ecc2d4
|
refs/heads/master
| 2020-04-05T23:29:09.439470
| 2016-10-25T10:47:27
| 2016-10-25T10:47:27
| 24,747,150
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,690
|
py
|
import sublime
import sublime_plugin
from Context.base import Base
class Selection(Base):
def on_query_context(self, *args):
callback = lambda view, sel: view.substr(sel)
return self._check_sel('selection', callback, *args)
class LineB(Base):
def on_query_context(self, *args):
callback = lambda view, sel: view.substr(view.line(sel.b))
return self._check_sel('line', callback, *args)
class FollowingTextA(Base):
def on_query_context(self, *args):
return self._check_sel('following_text_a', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(sel.a, view.line(sel.a).b))
class FollowingTextB(Base):
def on_query_context(self, *args):
return self._check_sel('following_text_b', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(sel.b, view.line(sel.b).b))
class FollowingTextBegin(Base):
def on_query_context(self, *args):
return self._check_sel('following_text_begin', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(sel.begin(), view.line(sel.begin()).b))
class FollowingTextEnd(Base):
def on_query_context(self, *args):
return self._check_sel('following_text_end', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(sel.end(), view.line(sel.end()).b))
class PrecedingTextA(Base):
def on_query_context(self, *args):
return self._check_sel('preceding_text_a', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(view.line(sel.a).a, sel.a))
class PrecedingTextB(Base):
def on_query_context(self, *args):
return self._check_sel('preceding_text_b', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(view.line(sel.b).a, sel.b))
class PrecedingTextBegin(Base):
def on_query_context(self, *args):
return self._check_sel('preceding_text_begin', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(view.line(sel.begin()).a, sel.begin()))
class PrecedingTextEnd(Base):
def on_query_context(self, *args):
return self._check_sel('preceding_text_end', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(view.line(sel.end()).a, sel.end()))
class Preceding128CharsBegin(Base):
def on_query_context(self, *args):
return self._check_sel('preceding_128_chars_begin', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(max(0, sel.begin() - 128), sel.begin()))
class Preceding512CharsBegin(Base):
def on_query_context(self, *args):
return self._check_sel('preceding_512_chars_begin', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(max(0, sel.begin() - 512), sel.begin()))
class Following128CharsEnd(Base):
def on_query_context(self, *args):
return self._check_sel('following_128_chars_end', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(sel.begin(), min(view.size(), sel.end() + 128)))
class Following512CharsEnd(Base):
def on_query_context(self, *args):
return self._check_sel('following_512_chars_end', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(sel.begin(), min(view.size(), sel.end() + 512)))
class Begin512Chars(Base):
def on_query_context(self, *args):
return self._check_sel('begin_512_chars', self._callback, *args)
def _callback(self, view, sel):
return view.substr(sublime.Region(0, min(view.size(), 512)))
|
[
"leonid@shagabutidnov.com"
] |
leonid@shagabutidnov.com
|
357c3e80a1db4ce379c01c1f50edcb5735a608aa
|
26eeab4b8c472a9b65897a30bc7c6779b4c418e8
|
/2_Data Wrangling/assignment2.py
|
b4e6f324ad9c30f1194fcc060c39605ede41309f
|
[
"MIT"
] |
permissive
|
gaurprabhakar94/Dat210x
|
3c187e779c7e77cb51499e23c79fce5f1f5efd38
|
06fe3c2a4354c7eaba3215ecddf9ed9c47cca8e3
|
refs/heads/master
| 2021-01-01T20:31:52.740756
| 2017-07-31T13:48:02
| 2017-07-31T13:48:02
| 98,882,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 332
|
py
|
import pandas as pd
import os
os.chdir('Datasets')
# Loading up the dataset
#
df = pd.read_csv("tutorial.csv", sep=',')
print(df)
print("\n")
# Printing the results of the .describe() method
#
print(df.describe())
print("\n")
# Indexing the dataframe with: [2:4,'col3']
# and then printing the value
print(df.loc[2:4,'col3'])
|
[
"noreply@github.com"
] |
gaurprabhakar94.noreply@github.com
|
f20e69a54e2028b1df280af140d788ce4bcb2e16
|
6284b810dc0d23b7735bf988bbeaa84f5d5f9f49
|
/src/models/train_model.py
|
ad436da4a89db35c3bb7136a72f815693a4037e4
|
[
"BSD-3-Clause"
] |
permissive
|
Lewuathe/TensorFlow-StyleTransfer
|
5985fa3b606b6cb8b0d6ce29c6148eb93fcbd4bb
|
8ef95003c395c7130492a6af5a982f02aa199fa3
|
refs/heads/master
| 2020-12-25T18:52:41.651940
| 2017-06-11T13:34:57
| 2017-06-11T13:34:57
| 94,006,269
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,885
|
py
|
#!/usr/bin/env python
""" An implementation of the paper "A Neural Algorithm of Artistic Style"
by Gatys et al. in TensorFlow.
Author: Chip Huyen (huyenn@stanford.edu)
Prepared for the class CS 20SI: "TensorFlow for Deep Learning Research"
For more details, please read the assignment handout:
http://web.stanford.edu/class/cs20si/assignments/a2.pdf
"""
from __future__ import print_function
import os
import time
import numpy as np
import tensorflow as tf
import vgg_model
import utils
# parameters to manage experiments
STYLE_IMAGE = 'data/styles/{}.jpg'
CONTENT_IMAGE = 'data/content/{}.jpg'
IMAGE_HEIGHT = 333
IMAGE_WIDTH = 333
IMAGE_FILENAME = '{}/{}.png'
tf.flags.DEFINE_string(
"style", 'hokusai', "Style image. Image must be put in data/styles.")
tf.flags.DEFINE_string(
"content", None, 'Content image. Image must be put in data/content.')
tf.flags.DEFINE_float(
"noise_ratio", 0.6, "Percentage of weight of the noise for intermixing with the content image")
tf.flags.DEFINE_float(
"style_loss_ratio", 0.05, "The weight of style loss with content loss. The total loss is content_loss + ratio * style_loss")
tf.flags.DEFINE_string(
"content_loss_layer", "conv4_2", "The layer used for calculating content loss. (conv1_2, conv2_2, conv3_2, conv4_2, conv5_2)")
tf.flags.DEFINE_string(
"output_dir", "data/outputs", "The output dir where generated image will be put.")
tf.flags.DEFINE_integer(
"iters", 300, "Iterations")
tf.flags.DEFINE_float(
"learning_rate", 2.0, "Learning rate")
FLAGS = tf.flags.FLAGS
# Layers used for style features. You can change this.
STYLE_LAYERS = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
W = [0.5, 1.0, 1.5, 3.0, 4.0] # give more weights to deeper layers.
MEAN_PIXELS = np.array([123.68, 116.779, 103.939]).reshape((1,1,1,3))
""" MEAN_PIXELS is defined according to description on their github:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8
'In the paper, the model is denoted as the configuration D trained with scale jittering.
The input images should be zero-centered by mean pixel (rather than mean image) subtraction.
Namely, the following BGR values should be subtracted: [103.939, 116.779, 123.68].'
"""
# VGG-19 parameters file
VGG_DOWNLOAD_LINK = 'http://www.vlfeat.org/matconvnet/models/imagenet-vgg-verydeep-19.mat'
VGG_MODEL = 'models/imagenet-vgg-verydeep-19.mat'
EXPECTED_BYTES = 534904783
def _create_content_loss(p, f):
"""
Calculate the loss between the feature representation of the
content image and the generated image.
Inputs:
p, f are just P, F in the paper
(read the assignment handout if you're confused)
Note: we won't use the coefficient 0.5 as defined in the paper
but the coefficient as defined in the assignment handout.
Output:
the content loss
"""
return tf.reduce_mean(tf.square(f - p)) / (4 * p.size)
def _gram_matrix(F, N, M):
"""
Create and return the gram matrix for tensor F
"""
m = tf.reshape(F, shape=[M, N])
return tf.matmul(tf.transpose(m), m)
def _single_style_loss(a, g):
""" Calculate the style loss at a certain layer
Inputs:
a is the feature representation of the real image
g is the feature representation of the generated image
Output:
the style loss at a certain layer (which is E_l in the paper)
"""
N = a.shape[3]
M = a.shape[1] * a.shape[2]
A = _gram_matrix(a, N, M)
G = _gram_matrix(g, N, M)
return tf.reduce_mean(tf.square(G - A)) / (4 * N * N * M * M)
def _create_style_loss(A, model):
"""
Return the total style loss
"""
n_layers = len(STYLE_LAYERS)
E = [W[i] * _single_style_loss(A[i], model[STYLE_LAYERS[i]]) for i in range(n_layers)]
return tf.reduce_sum(E)
def _create_losses(model, input_image, content_image, style_image):
with tf.variable_scope('loss') as scope:
with tf.Session() as sess:
sess.run(input_image.assign(content_image)) # assign content image to the input variable
p = sess.run(model[FLAGS.content_loss_layer])
content_loss = _create_content_loss(p, model[FLAGS.content_loss_layer])
with tf.Session() as sess:
sess.run(input_image.assign(style_image))
A = sess.run([model[layer_name] for layer_name in STYLE_LAYERS])
style_loss = _create_style_loss(A, model)
total_loss = content_loss + FLAGS.style_loss_ratio * style_loss
return content_loss, style_loss, total_loss
def _create_summary(model):
""" Create summary ops necessary
Hint: don't forget to merge them
"""
tf.summary.scalar('content_loss', model['content_loss'])
tf.summary.scalar('style_loss', model['style_loss'])
tf.summary.scalar('total_loss', model['total_loss'])
return tf.summary.merge_all()
def train(model, generated_image, initial_image):
"""
Train your model.
"""
skip_step = 1
with tf.Session() as sess:
saver = tf.train.Saver()
init = tf.global_variables_initializer()
sess.run(init)
writer = tf.summary.FileWriter('graphs/style_transfer', sess.graph)
sess.run(generated_image.assign(initial_image))
ckpt = tf.train.get_checkpoint_state(os.path.dirname('graphs/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
initial_step = model['global_step'].eval()
start_time = time.time()
for index in range(initial_step, FLAGS.iters):
if index >= 5 and index < 20:
skip_step = 10
elif index >= 20:
skip_step = 20
sess.run(model['optimizer'])
if (index + 1) % skip_step == 0:
total_loss, gen_image = sess.run([model['total_loss'], generated_image])
gen_image = gen_image + MEAN_PIXELS
summary = sess.run(model['summary_op'])
writer.add_summary(summary, global_step=index)
print('Step {}\n Sum: {:5.1f}'.format(index + 1, np.sum(gen_image)))
print(' Loss: {:5.1f}'.format(total_loss))
print(' Time: {}'.format(time.time() - start_time))
start_time = time.time()
filename = IMAGE_FILENAME.format(FLAGS.output_dir, index)
utils.save_image(filename, gen_image)
if (index + 1) % 20 == 0:
saver.save(sess, 'graphs/checkpoints/style_transfer', index)
def main(argv):
with tf.variable_scope('input') as scope:
# use variable instead of placeholder because we're training the intial image to make it
# look like both the content image and the style image
input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
model = vgg_model.load_vgg(VGG_MODEL, input_image)
model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
content_image = utils.get_resized_image(CONTENT_IMAGE.format(FLAGS.content), IMAGE_HEIGHT, IMAGE_WIDTH)
content_image = content_image - MEAN_PIXELS
style_image = utils.get_resized_image(STYLE_IMAGE.format(FLAGS.style), IMAGE_HEIGHT, IMAGE_WIDTH)
style_image = style_image - MEAN_PIXELS
model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model, input_image, content_image, style_image)
model['optimizer'] = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate).minimize(model['total_loss'])
model['summary_op'] = _create_summary(model)
initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, FLAGS.noise_ratio)
train(model, input_image, initial_image)
if __name__ == '__main__':
tf.app.run()
|
[
"lewuathe@me.com"
] |
lewuathe@me.com
|
04776e04755c2c4f4f926f7ed79f83f4d9408b92
|
ff5892487c262ce845a9996a282d3a2fdb1a3b15
|
/URI_1277.py
|
fac88b7da4e486a86591fdb123b779e70dc235a8
|
[] |
no_license
|
dankoga/URIOnlineJudge--Python-3.9
|
d424a47671f106d665a4e255382fc0ec3059096a
|
f1c99521caeff59be0843af5f63a74013b63f7f0
|
refs/heads/master
| 2023-07-15T08:32:11.040426
| 2021-09-03T13:27:17
| 2021-09-03T13:27:17
| 393,991,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
tests_qty = int(input())
for t in range(tests_qty):
students_qty = input()
students_names = list(input().split())
students_attendance = list(input().split())
students_failed = []
for s, attendance in enumerate(students_attendance):
presences = attendance.count('P')
medicals = attendance.count('M')
if 4 * presences < 3 * (len(attendance) - medicals):
students_failed += [students_names[s]]
print(' '.join([student for student in students_failed]))
|
[
"dankoga2@gmail.com"
] |
dankoga2@gmail.com
|
610e7c734f888cd0e10fa65dc899e1ba33ad5f76
|
84d97077cf9f297faeb94d88d460c6fd486cc648
|
/sub_py/maxwellian.py
|
1f8e47b118e0d8a6eb969a25ae5ec43f86b57880
|
[] |
no_license
|
jacksontvd/optimization
|
0ac12819acb091e6322d36f8eec183df0281dca6
|
e33b42fb1ce5dcd456d4db625b52f3097ef3edc2
|
refs/heads/master
| 2021-05-14T18:10:41.640978
| 2019-08-13T17:46:20
| 2019-08-13T17:46:20
| 98,315,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
import numpy as np
from ranges import *
def Maxwellian(e,T):
return np.sqrt(e)*np.exp(-e/T)*(2/np.sqrt(np.pi)/np.sqrt(T)/T)
vMaxwellian = np.vectorize(Maxwellian)
egrid = np.array(mannhart_bins)
degrid = egrid[1:]-egrid[:-1]
def MaxwellianSpectrum(T):
egrid = mannhart_bins
# np.logspace(-3,2,bin_number['mannhart'])
return vMaxwellian(egrid,T)
|
[
"jacksontvd@jacksons-MacBook-Pro.local"
] |
jacksontvd@jacksons-MacBook-Pro.local
|
417eb43445bf9c1c7d3bf9e002da3c3710411639
|
639c0b542b581463eac228b82707452527a151af
|
/EmotionalAIchatbot/EmotionAIChatbot/urls.py
|
4de444d482ee6905da0c987734fa5b245dd3140f
|
[] |
no_license
|
latifyahia/EmotionAIChatbotProject
|
7ccf5f7399aa3412965ef1883b47d9e39ff972cf
|
e2b5a1802cab4bf5b6f9cb62bad43d2ab4b56ed8
|
refs/heads/master
| 2023-04-29T07:21:13.488221
| 2021-05-15T03:34:48
| 2021-05-15T03:34:48
| 319,690,700
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,350
|
py
|
"""EmotionAIChatbot URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from chatbot.views import *
from register import views as v
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('', home),
path('home/', home),
path('lolly/', lolly),
path('about/', about),
path('profile/', profile),
path('updateEmotions/', updateEmotions),
path('register/', v.register, name='register'), # url for registration and assigning the url to an function inside view.py
path('', include('django.contrib.auth.urls')), # url for /login , /logout
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
[
"latifyahia98@gmail.com"
] |
latifyahia98@gmail.com
|
40c2d795c844c13a65815ddcff61807f4bf6f366
|
e70c7a33b037f021460a7ba39941119b997561e5
|
/delete.py
|
0328d1a0ec5355f99357d8bacd78e1835887d835
|
[] |
no_license
|
ishikavyas18/Python_Basic
|
ad38e5fbeb6b952294e231d7319757b65212f754
|
c163a9a9b885d07aae5b665eb3807c02fb514a31
|
refs/heads/main
| 2023-04-03T10:15:45.993095
| 2021-04-08T06:56:31
| 2021-04-08T06:56:31
| 355,451,141
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 478
|
py
|
#create an array with 5 element and delete the value at index 2 without built in function
from array import *
arr=array('i',[])
n=int(input("enter length :"))
for i in range(n):
x=int(input("enter next element :"))
arr.append(x)
print("array is :",arr)
arr1=array('i',[])
del_element=int(input("enter elemnet to be deleted : "))
for element in arr:
if element==del_element:
continue
else:
arr1.append(element)
print(arr1)
|
[
"noreply@github.com"
] |
ishikavyas18.noreply@github.com
|
67ec508d63e4b2973ed2137e74fa0e57033e6343
|
2c74bb301f1ed83b79254944183ac5a18a639fdf
|
/homeassistant/components/litterrobot/vacuum.py
|
55f0a182959c463662df3f53afc4fe8e177d87a6
|
[
"Apache-2.0"
] |
permissive
|
Adminiuga/home-assistant
|
5bec93007ddac1a268cc359bf7e48530c5f73b38
|
dcf68d768e4f628d038f1fdd6e40bad713fbc222
|
refs/heads/dev
| 2023-02-22T22:03:31.013931
| 2022-11-09T00:27:20
| 2022-11-09T00:27:20
| 123,929,062
| 5
| 4
|
Apache-2.0
| 2023-02-22T06:14:31
| 2018-03-05T14:11:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,671
|
py
|
"""Support for Litter-Robot "Vacuum"."""
from __future__ import annotations
from datetime import time
from typing import Any
from pylitterbot import LitterRobot
from pylitterbot.enums import LitterBoxStatus
import voluptuous as vol
from homeassistant.components.vacuum import (
DOMAIN as PLATFORM,
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_PAUSED,
StateVacuumEntity,
StateVacuumEntityDescription,
VacuumEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
import homeassistant.util.dt as dt_util
from .const import DOMAIN
from .entity import LitterRobotEntity, async_update_unique_id
from .hub import LitterRobotHub
SERVICE_SET_SLEEP_MODE = "set_sleep_mode"
LITTER_BOX_STATUS_STATE_MAP = {
LitterBoxStatus.CLEAN_CYCLE: STATE_CLEANING,
LitterBoxStatus.EMPTY_CYCLE: STATE_CLEANING,
LitterBoxStatus.CLEAN_CYCLE_COMPLETE: STATE_DOCKED,
LitterBoxStatus.CAT_SENSOR_TIMING: STATE_DOCKED,
LitterBoxStatus.DRAWER_FULL_1: STATE_DOCKED,
LitterBoxStatus.DRAWER_FULL_2: STATE_DOCKED,
LitterBoxStatus.READY: STATE_DOCKED,
LitterBoxStatus.CAT_SENSOR_INTERRUPTED: STATE_PAUSED,
LitterBoxStatus.OFF: STATE_OFF,
}
LITTER_BOX_ENTITY = StateVacuumEntityDescription("litter_box", name="Litter Box")
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Litter-Robot cleaner using config entry."""
hub: LitterRobotHub = hass.data[DOMAIN][entry.entry_id]
entities = [
LitterRobotCleaner(robot=robot, hub=hub, description=LITTER_BOX_ENTITY)
for robot in hub.litter_robots()
]
async_update_unique_id(hass, PLATFORM, entities)
async_add_entities(entities)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_SLEEP_MODE,
{
vol.Required("enabled"): cv.boolean,
vol.Optional("start_time"): cv.time,
},
"async_set_sleep_mode",
)
class LitterRobotCleaner(LitterRobotEntity[LitterRobot], StateVacuumEntity):
"""Litter-Robot "Vacuum" Cleaner."""
_attr_supported_features = (
VacuumEntityFeature.START
| VacuumEntityFeature.STATE
| VacuumEntityFeature.STATUS
| VacuumEntityFeature.TURN_OFF
| VacuumEntityFeature.TURN_ON
)
@property
def state(self) -> str:
"""Return the state of the cleaner."""
return LITTER_BOX_STATUS_STATE_MAP.get(self.robot.status, STATE_ERROR)
@property
def status(self) -> str:
"""Return the status of the cleaner."""
return (
f"{self.robot.status.text}{' (Sleeping)' if self.robot.is_sleeping else ''}"
)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the cleaner on, starting a clean cycle."""
await self.robot.set_power_status(True)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the unit off, stopping any cleaning in progress as is."""
await self.robot.set_power_status(False)
async def async_start(self) -> None:
"""Start a clean cycle."""
await self.robot.start_cleaning()
async def async_set_sleep_mode(
self, enabled: bool, start_time: str | None = None
) -> None:
"""Set the sleep mode."""
await self.robot.set_sleep_mode(
enabled, self.parse_time_at_default_timezone(start_time)
)
@staticmethod
def parse_time_at_default_timezone(time_str: str | None) -> time | None:
"""Parse a time string and add default timezone."""
if time_str is None:
return None
if (parsed_time := dt_util.parse_time(time_str)) is None: # pragma: no cover
return None
return (
dt_util.start_of_local_day()
.replace(
hour=parsed_time.hour,
minute=parsed_time.minute,
second=parsed_time.second,
)
.timetz()
)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return device specific state attributes."""
return {
"is_sleeping": self.robot.is_sleeping,
"sleep_mode_enabled": self.robot.sleep_mode_enabled,
"power_status": self.robot.power_status,
"status": self.status,
}
|
[
"noreply@github.com"
] |
Adminiuga.noreply@github.com
|
52beb2eee386d8bea800f040b68e68ef55ac4084
|
c258135c1fd1e877d7ac76b7da7b287d79ac4ccf
|
/lists/migrations/0003_auto_20210217_0938.py
|
19a2c6ba61da21944607c0b24338d250c4571c9d
|
[] |
no_license
|
soo4767/airbnb-clone
|
5cccbcb63da2b8ed7e755aa784e5b81a62e35e0a
|
beded0d68d00625153997766584032453dc330e0
|
refs/heads/main
| 2023-07-18T11:20:34.824337
| 2021-09-02T08:23:44
| 2021-09-02T08:23:44
| 342,242,760
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
# Generated by Django 2.2.5 on 2021-02-17 00:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_auto_20210217_0933'),
]
operations = [
migrations.AlterField(
model_name='list',
name='rooms',
field=models.ManyToManyField(blank=True, related_name='lists', to='rooms.Room'),
),
]
|
[
"soo4767@naver.com"
] |
soo4767@naver.com
|
4dceba75990c87e8d1ab82a0f615d6313bafbec9
|
5e4f9cf736b6b3cbc7c3bf300ebf15f19f5514a9
|
/openelections/settings_prod.py
|
5dbc0db30e500433e9e0866b4d425bd10ba0c5eb
|
[] |
no_license
|
rawbeans/elections
|
1cb2df081ce3a24c68f073e1778051cc431038bf
|
1be01c82e77ad4c80420cd05b6c189ee0743ba30
|
refs/heads/master
| 2016-09-08T00:40:12.086208
| 2014-04-01T00:33:37
| 2014-04-01T00:33:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,292
|
py
|
from settings import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'elections_db',
'USER': 'mysql_user',
'PASSWORD': '398hudb4s{}DSg'
}
}
MEDIA_ROOT = '/home/admin-elections/elections/media'
BALLOT_ROOT = '/home/admin-elections/elections/ballots'
LOG_ROOT = '/home/admin-elections/elections/logs'
STUDENT_CSV = '/home/admin-elections/elections/students.csv'
WEBAUTH_SHARED_SECRET = 'abcdchangeme'
WEBAUTH_URL = 'https://www.stanford.edu/~rwoodby/cgi-bin/Django-WebAuth/webauth-host/wa-authenticate.php'
BASE_URL = 'http://petitions.stanford.edu/' #173.230.149.189 #petitions.stanford.edu
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = BASE_URL + 'media/'
STATIC_URL = BASE_URL + 'static/'
STATIC_ROOT = '/home/admin-elections/elections/static'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = BASE_URL + 'media/admin/'
|
[
"root@debian"
] |
root@debian
|
22d2f2c04e720558e3006f9a711d7f1ec1fdcd0d
|
815150729f61f5909f271d78cf4f484952a09667
|
/models.py
|
606ff69f02cfe0b6d8253d6e4185ca7904213756
|
[] |
no_license
|
sanket0024/MapApp
|
7c2c092a784fbabd47c26694019402eba2a02f8c
|
1a34c004b823930d08754451bd0df5241b41c69d
|
refs/heads/master
| 2021-05-09T05:01:18.949613
| 2018-02-11T04:15:11
| 2018-02-11T04:15:11
| 119,296,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,685
|
py
|
from flask_sqlalchemy import SQLAlchemy
from werkzeug import generate_password_hash, check_password_hash
import geocoder
import urllib2
import json
db = SQLAlchemy()
class User(db.Model):
__tablename__ = 'users'
uid = db.Column(db.Integer, primary_key = True)
firstname = db.Column(db.String(20))
lastname = db.Column(db.String(20))
email = db.Column(db.String(30), unique=True)
pwdhash = db.Column(db.String(100))
def __init__(self, firstname, lastname, email, password):
self.firstname = firstname.title()
self.lastname = lastname.title()
self.email = email.lower()
self.set_password(password)
def set_password(self, password):
self.pwdhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pwdhash, password)
class Place(object):
def wikipath(self, slug):
return urllib2.urlparse.urljoin("http://en.wikipedia.org/wiki/", slug.replace(' ', '_'))
def latlng(self, address):
g = geocoder.google(address)
return (g.lat, g.lng)
def query(self, address):
lat, lng = self.latlng(address)
query_url = 'https://en.wikipedia.org/w/api.php?action=query&list=geosearch&gsradius=5000&gscoord={0}%7C{1}&gslimit=20&format=json'.format(lat, lng)
g = urllib2.urlopen(query_url)
res = g.read()
g.close()
data = json.loads(res)
places = []
for place in data['query']['geosearch']:
name = place['title']
meters = place['dist']
lat = place['lat']
lng = place['lon']
wiki_url = self.wikipath(name)
walking_time = int(meters/80)
d = {
'name': name,
'url': wiki_url,
'time': walking_time,
'lat': lat,
'lng': lng
}
places.append(d)
return places
|
[
"mathur.s@husky.neu.edu"
] |
mathur.s@husky.neu.edu
|
05aabec82a097f5660414b1d9190028b0779cf8f
|
f3b66c071fb97ae17690ad0e8dc17efa8ae01637
|
/python-selenium-pytest-allure/test_example.py
|
900092c5b56a41554e2355ff5cf34d0636ff7aea
|
[] |
no_license
|
unickq/test-automation-example
|
054d8e4e68ce23abb9ef31403195e390e452db5a
|
54071fae28d00176f9adf01f9514b6b7e7fc4587
|
refs/heads/master
| 2023-03-05T21:57:33.575595
| 2019-07-12T10:50:40
| 2019-07-12T10:50:40
| 193,077,100
| 2
| 1
| null | 2023-03-03T01:30:05
| 2019-06-21T10:10:43
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 936
|
py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import pytest
import allure
@pytest.fixture
def chrome(request):
global driver
driver = webdriver.Chrome()
driver.maximize_window()
yield driver
if request.node.rep_call.failed:
allure.attach(driver.get_screenshot_as_png(), name=request.function.__name__, attachment_type=allure.attachment_type.PNG)
driver.quit()
@allure.step
def search_for(query):
driver.get("https://github.com")
qEl = driver.find_element_by_name("q")
qEl.send_keys(query)
qEl.send_keys(Keys.RETURN)
@allure.step
def get_results(query):
search_for(query)
return [el.text for el in driver.find_elements_by_css_selector(".repo-list li h3")]
def test_should_pass(chrome):
assert "unickq/allure-nunit" in get_results("allure-nunit")
def test_should_fail(chrome):
assert "junit" in get_results("allure-nunit")
|
[
"nicktestqa@yahoo.com"
] |
nicktestqa@yahoo.com
|
6946c2d1d8de914fdba20d32e69dd195cb172215
|
3891b3b89d6863c395608af5e5bbd0c8ba6eb02b
|
/server/src/repository/eventmongorep.py
|
51ebd28b3c5e66229a057c1fcace101f59f7d310
|
[
"MIT"
] |
permissive
|
rickie95/MTG-coordinator
|
d031f19de891af37f6d8c95aa87df71b6e244cc3
|
fe3403a8a6b24d966b20ffb8261c60aadd0abde0
|
refs/heads/master
| 2022-02-26T11:17:35.576408
| 2022-02-15T07:29:53
| 2022-02-15T07:29:53
| 230,934,786
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 292
|
py
|
class EventMongoRepository:
database_address = "localhost"
def __init__(self):
self.client = "Opens a connection with mongo"
def add_event(self, event):
pass
def remove_event(self, event):
pass
def update_event(self, event):
pass
|
[
"Riccardo Malavolti"
] |
Riccardo Malavolti
|
f829289503872b6c211b8e36cdaff49797ef8a2d
|
4a73aa4a14c207a39648e73f1f07c9f0b7348936
|
/pca.py
|
470c4338bb0e2c63581d8f462610120f6e583b3d
|
[] |
no_license
|
nicktimko/penny-project
|
6058e9d7496fb916963698a188e92dde21947a4b
|
2e0941ef02691b92ea652c7206017fa974f38416
|
refs/heads/master
| 2021-08-22T17:12:41.442809
| 2017-11-30T19:09:04
| 2017-11-30T19:09:04
| 112,648,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,418
|
py
|
#!/usr/bin/env python
# from http://stackoverflow.com/a/2629704/194586
""" a small class for Principal Component Analysis
Usage:
p = PCA( A, fraction=0.90 )
In:
A: an array of e.g. 1000 observations x 20 variables, 1000 rows x 20 columns
fraction: use principal components that account for e.g.
90 % of the total variance
Out:
p.U, p.d, p.Vt: from numpy.linalg.svd, A = U . d . Vt
p.dinv: 1/d or 0, see NR
p.eigen: the eigenvalues of A*A, in decreasing order (p.d**2).
eigen[j] / eigen.sum() is variable j's fraction of the total variance;
look at the first few eigen[] to see how many PCs get to 90 %, 95 % ...
p.npc: number of principal components,
e.g. 2 if the top 2 eigenvalues are >= `fraction` of the total.
It's ok to change this; methods use the current value.
Methods:
The methods of class PCA transform vectors or arrays of e.g.
20 variables, 2 principal components and 1000 observations,
using partial matrices U' d' Vt', parts of the full U d Vt:
A ~ U' . d' . Vt' where e.g.
U' is 1000 x 2
d' is diag([ d0, d1 ]), the 2 largest singular values
Vt' is 2 x 20. Dropping the primes,
d . Vt 2 principal vars = p.vars_pc( 20 vars )
U 1000 obs = p.pc_obs( 2 principal vars )
U . d . Vt 1000 obs, p.obs( 20 vars ) = pc_obs( vars_pc( vars ))
fast approximate A . vars, using the `npc` principal components
Ut 2 pcs = p.obs_pc( 1000 obs )
V . dinv 20 vars = p.pc_vars( 2 principal vars )
V . dinv . Ut 20 vars, p.vars( 1000 obs ) = pc_vars( obs_pc( obs )),
fast approximate Ainverse . obs: vars that give ~ those obs.
Notes:
PCA does not center or scale A; you usually want to first
A -= A.mean(A, axis=0)
A /= A.std(A, axis=0)
with the little class Center or the like, below.
See also:
http://en.wikipedia.org/wiki/Principal_component_analysis
http://en.wikipedia.org/wiki/Singular_value_decomposition
Press et al., Numerical Recipes (2 or 3 ed), SVD
PCA micro-tutorial
iris-pca .py .png
"""
from __future__ import division
import numpy as np
dot = np.dot
# import bz.numpyutil as nu
# dot = nu.pdot
__version__ = "2010-04-14 apr"
__author_email__ = "denis-bz-py at t-online dot de"
#...............................................................................
class PCA:
def __init__( self, A, fraction=0.90 ):
assert 0 <= fraction <= 1
# A = U . diag(d) . Vt, O( m n^2 ), lapack_lite --
self.U, self.d, self.Vt = np.linalg.svd( A, full_matrices=False )
assert np.all( self.d[:-1] >= self.d[1:] ) # sorted
self.eigen = self.d**2
self.sumvariance = np.cumsum(self.eigen)
self.sumvariance /= self.sumvariance[-1]
self.npc = np.searchsorted( self.sumvariance, fraction ) + 1
self.dinv = np.array([ 1/d if d > self.d[0] * 1e-6 else 0
for d in self.d ])
def pc( self ):
""" e.g. 1000 x 2 U[:, :npc] * d[:npc], to plot etc. """
n = self.npc
return self.U[:, :n] * self.d[:n]
# These 1-line methods may not be worth the bother;
# then use U d Vt directly --
def vars_pc( self, x ):
n = self.npc
return self.d[:n] * dot( self.Vt[:n], x.T ).T # 20 vars -> 2 principal
def pc_vars( self, p ):
n = self.npc
return dot( self.Vt[:n].T, (self.dinv[:n] * p).T ) .T # 2 PC -> 20 vars
def pc_obs( self, p ):
n = self.npc
return dot( self.U[:, :n], p.T ) # 2 principal -> 1000 obs
def obs_pc( self, obs ):
n = self.npc
return dot( self.U[:, :n].T, obs ) .T # 1000 obs -> 2 principal
def obs( self, x ):
return self.pc_obs( self.vars_pc(x) ) # 20 vars -> 2 principal -> 1000 obs
def vars( self, obs ):
return self.pc_vars( self.obs_pc(obs) ) # 1000 obs -> 2 principal -> 20 vars
class Center:
""" A -= A.mean() /= A.std(), inplace -- use A.copy() if need be
uncenter(x) == original A . x
"""
# mttiw
def __init__( self, A, axis=0, scale=True, verbose=1 ):
self.mean = A.mean(axis=axis)
if verbose:
print "Center -= A.mean:", self.mean
A -= self.mean
if scale:
std = A.std(axis=axis)
self.std = np.where( std, std, 1. )
if verbose:
print "Center /= A.std:", self.std
A /= self.std
else:
self.std = np.ones( A.shape[-1] )
self.A = A
def uncenter( self, x ):
return np.dot( self.A, x * self.std ) + np.dot( x, self.mean )
#...............................................................................
if __name__ == "__main__":
import sys
csv = "iris4.csv" # wikipedia Iris_flower_data_set
# 5.1,3.5,1.4,0.2 # ,Iris-setosa ...
N = 1000
K = 20
fraction = .90
seed = 1
exec "\n".join( sys.argv[1:] ) # N= ...
np.random.seed(seed)
np.set_printoptions( 1, threshold=100, suppress=True ) # .1f
try:
A = np.genfromtxt( csv, delimiter="," )
N, K = A.shape
except IOError:
A = np.random.normal( size=(N, K) ) # gen correlated ?
print "csv: %s N: %d K: %d fraction: %.2g" % (csv, N, K, fraction)
Center(A)
print "A:", A
print "PCA ..." ,
p = PCA( A, fraction=fraction )
print "npc:", p.npc
print "% variance:", p.sumvariance * 100
print "Vt[0], weights that give PC 0:", p.Vt[0]
print "A . Vt[0]:", dot( A, p.Vt[0] )
print "pc:", p.pc()
print "\nobs <-> pc <-> x: with fraction=1, diffs should be ~ 0"
x = np.ones(K)
# x = np.ones(( 3, K ))
print "x:", x
pc = p.vars_pc(x) # d' Vt' x
print "vars_pc(x):", pc
print "back to ~ x:", p.pc_vars(pc)
Ax = dot( A, x.T )
pcx = p.obs(x) # U' d' Vt' x
print "Ax:", Ax
print "A'x:", pcx
print "max |Ax - A'x|: %.2g" % np.linalg.norm( Ax - pcx, np.inf )
b = Ax # ~ back to original x, Ainv A x
back = p.vars(b)
print "~ back again:", back
print "max |back - x|: %.2g" % np.linalg.norm( back - x, np.inf )
# end pca.py
|
[
"prometheus235@gmail.com"
] |
prometheus235@gmail.com
|
a0e4c303efc08dfcd95ce2ebc3bdcb5f9c2dc0ff
|
31cfeca70ca2208a6a09deaaaf9198d612e826cf
|
/face lock on laptop/sms.py
|
05ff067aee7ef5a8e6c14c0045223d1732862909
|
[] |
no_license
|
anchalmehta567/facelock-on-laptop-with-sms-alert
|
78255871f06eec85e444c2bbbdb4471e792cd2dd
|
00aa6eec1e4bb4448de7181c0afbb181bb6641c7
|
refs/heads/main
| 2023-02-09T21:33:21.825801
| 2021-01-12T11:05:24
| 2021-01-12T11:05:24
| 328,958,971
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
import requests
import json
def sendsms():
url = 'https://www.fast2sms.com/dev/bulk'
params = {
'authorization': 'noyaAWs9wUgpJtGvhK50PN8r1dY3CiEqTkbFMeS7OR2ZjxQ4ufW9FG8NcOCfX35yLS1d0ta4rewVsBIz',
'sender_id': 'FSTSMS',
'message': "Alert! Someone aunthenticated user try to access your personal computer.\n\nMessage sent by Admin.\n\nThanks",
'language': 'english',
'route': 'p',
'numbers': 8290532795
}
response = requests.get(url, params=params)
dic = response.json()
print(dic)
print(dic.get('return'))
sendsms()
|
[
"noreply@github.com"
] |
anchalmehta567.noreply@github.com
|
ce0ccdaa17279ed85c5b90ef3cec04693655c84c
|
1572a5ce5acf8e67de4653308200be98a4cd1ffa
|
/user/migrations/0002_auto_20210328_1256.py
|
b58829088b647d1c4b584497f7698b11c22df36c
|
[] |
no_license
|
shourygupta28/hacko-40-team_titans
|
9481c7db1114bb355ab4699709c7153ad19442eb
|
b72294240cce6819463b4975a1fd167af65b08be
|
refs/heads/main
| 2023-03-29T07:53:47.333891
| 2021-04-01T12:05:21
| 2021-04-01T12:05:21
| 351,955,989
| 3
| 0
| null | 2021-04-01T12:05:21
| 2021-03-27T01:40:02
|
HTML
|
UTF-8
|
Python
| false
| false
| 779
|
py
|
# Generated by Django 3.1.7 on 2021-03-28 07:26
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='coins',
),
migrations.AddField(
model_name='user',
name='account_no',
field=models.CharField(default='0X00000000', max_length=80, validators=[django.core.validators.MinLengthValidator(10)]),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
[
"ngupta_be19@thapar.edu"
] |
ngupta_be19@thapar.edu
|
c97fff87f7c26440db1bdb2a815a2eaa3c4b974e
|
eb76de661f71ea8ac7fe32d3eec5c936b9ec6393
|
/day5.py
|
5924c7952a7cb3dff46222a7907f43bf994da74d
|
[] |
no_license
|
zzwerling/Advent-Of-Code-Zach-2018
|
6f3c7ad5c851410c5302ce55de54c8176881f5b5
|
fc14ead4fa1a696b902ff0b5e2f750135e6553c7
|
refs/heads/master
| 2020-04-10T00:55:21.966785
| 2018-12-24T02:09:24
| 2018-12-24T02:09:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,084
|
py
|
from collections import defaultdict
import time
def day5part1():
with open("day5demoinput.txt","r") as day5input:
s = list(day5input.read())
running = True
while running:
indices_to_remove = []
for i in range(0, len(s)-1):
if reaction(s, i, i+1):
if (i not in indices_to_remove) and (i+1 not in indices_to_remove):
indices_to_remove.append(i)
indices_to_remove.append(i+1)
if indices_to_remove:
indices_to_remove.reverse()
for i in indices_to_remove:
del s[i]
else:
return len(s)
def day5part2():
alphabet = "abcdefghijklmnopqrstuvwxyz"
length_string = defaultdict(lambda: 0)
for i in range(len(alphabet)):
with open("day5input.txt", "r") as day5input:
input_string = list(day5input.read())
while alphabet[i] in input_string: input_string.remove(alphabet[i])
while alphabet[i].upper() in input_string: input_string.remove(alphabet[i].upper())
length_string[alphabet[i]] = react_string(input_string)
return min(length_string.values())
def react_string(s):
running = True
while running:
indices_to_remove = []
for i in range(0, len(s)-1):
if reaction(s, i, i+1):
if (i not in indices_to_remove) and (i+1 not in indices_to_remove):
indices_to_remove.append(i)
indices_to_remove.append(i+1)
if indices_to_remove:
indices_to_remove.reverse()
for i in indices_to_remove:
del s[i]
else:
return len(s)
def reaction(s, index1, index2):
if s[index1].lower() == s[index2].lower():
if s[index1].isupper() and s[index2].islower() or s[index1].islower() and s[index2].isupper():
return True
else:
return False
#start_time = time.time()
#print(day5part2())
print(day5part1())
|
[
"noreply@github.com"
] |
zzwerling.noreply@github.com
|
94222f8915f338f516fd10108308b2f667e5785f
|
0b8c83d28294bbf708585b44065776ad13895abd
|
/leadmanger/djangogram/djangogram/users/urls.py
|
8e888f93481a372e530f39dcba7d2a3ee1e87923
|
[
"MIT"
] |
permissive
|
chaSJ2112/web_Django
|
2a1613f531d7d3e8e34db7e486f63ebb3ae4c5e7
|
e9fa7ad7e5a952eaa601aa420f0d8a31d774c7b8
|
refs/heads/main
| 2023-06-25T04:13:06.032679
| 2021-07-27T14:17:36
| 2021-07-27T14:17:36
| 386,592,108
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from django.urls import path
from . import views
app_name = "users"
# views에서 main함수 호출
urlpatterns = [
path('', views.main, name='main'),
path('signup/', views.signup, name='signup'),
]
|
[
"71691834+chaSJ2112@users.noreply.github.com"
] |
71691834+chaSJ2112@users.noreply.github.com
|
4d2f22cd2a93f8461925aed411d0c93d30f4f95b
|
ae6c4d72ba0b46fb525f749adb076b009b0842f1
|
/venv/Scripts/django-admin.py
|
e9fe348a13e2142eca5e7e5e4a3b1f7b8b945f5f
|
[] |
no_license
|
daria-darina/education1
|
35fb8b1dccf8bf61d19df0923b1dd2343ccba44c
|
093f864e049e2490276dd47654fac1011d8333c8
|
refs/heads/master
| 2022-08-13T19:19:11.735704
| 2020-05-16T09:04:12
| 2020-05-16T09:04:12
| 263,017,980
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
#!c:\users\tom\education\venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"dasha.luckina2015@yandex.ru"
] |
dasha.luckina2015@yandex.ru
|
f9a56a6585ea5dd3a31979189766a5a2c642fccd
|
b6e0acd984d0b739ebf7b2f6a5b9fc39cd66b1ee
|
/escola/migrations/0012_auto_20210630_2028.py
|
8793774f81a59a078020756431253394f17ca11d
|
[] |
no_license
|
isabellebussmann/test_ies2
|
215d14b7ba75c2a67d783f88de8cb56ef6060f4a
|
aeb61addd07f43eb82f88e87bd809aaa9cc0907f
|
refs/heads/master
| 2023-06-15T09:24:00.201655
| 2021-07-10T01:46:58
| 2021-07-10T01:46:58
| 383,865,980
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
# Generated by Django 3.2.4 on 2021-06-30 23:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('escola', '0011_auto_20210630_1707'),
]
operations = [
migrations.RemoveField(
model_name='pergunta',
name='prova',
),
migrations.CreateModel(
name='ModeloProva',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prova', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='escola.prova')),
('questoes', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='escola.pergunta')),
],
),
]
|
[
"isabellebussmann@hotmail.com"
] |
isabellebussmann@hotmail.com
|
b36c954f93245e8ae824e025a25379ee7b666823
|
9ca20119e9347576431fc587d0e4c742ea4d45af
|
/app/auth.py
|
5dbc836e1d6c6141b7b5362bc810e77e929be2c1
|
[] |
no_license
|
YimingDou/ShopifyBackendChallenge
|
43fcad4fd2a6dfc954245dd8b0ba5f924ef4b303
|
86e9f154e0ec70a11b354ac5b409299eb2255281
|
refs/heads/master
| 2022-12-09T23:23:03.647974
| 2020-09-11T00:13:06
| 2020-09-11T00:13:06
| 294,547,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,914
|
py
|
from flask import Blueprint, request
from flask_jwt_extended import create_access_token
from functools import wraps
from sqlalchemy.exc import IntegrityError
from responses import error_response, token_response, success_response
from model.user import User
from model.common import db
MISSING_JSON = "Missing JSON"
MISSING_PARAMETER = "Missing Username or Password"
INVALID_LOGIN = "Invalid login"
DUPLICATE_USER = "Username is already registered"
USER_CREATED = "User created"
auth = Blueprint('auth', __name__)
def require_json(func):
@wraps(func)
def wrapper():
if not request.is_json:
return error_response(MISSING_JSON)
return func()
return wrapper
# Pass username and password in cleartext
# Only for demonstration purpose
# In them future change to more secure method
@auth.route('/register', methods=['POST'])
@require_json
def register():
username = request.json.get('username', None)
password = request.json.get('password', None)
print(username, password)
if not username or not password:
return error_response(MISSING_PARAMETER)
user = User(username=username, password=password)
db.session.add(user)
try:
db.session.commit()
except IntegrityError:
return error_response(DUPLICATE_USER)
return success_response(USER_CREATED)
@auth.route('/login', methods=['POST'])
@require_json
def login():
username = request.json.get('username', None)
password = request.json.get('password', None)
if not username or not password:
return error_response(MISSING_PARAMETER)
# Auth here
user = User.query.filter_by(username=username).first()
if user.password != password:
return error_response(INVALID_LOGIN, 401)
# Identity can be any data that is json serializable
access_token = create_access_token(identity=user.id)
return token_response(access_token)
|
[
"y5dou@uwaterloo.ca"
] |
y5dou@uwaterloo.ca
|
d8ae8d8746b20a062a25abfe7a48d4c53b3410fd
|
f6376348b154ca1d43eafb271f7cf463d8a83d86
|
/utils/gsn_argparse.py
|
ce5326094025b3bd2a391276170027767273074e
|
[
"MIT"
] |
permissive
|
SongFGH/graph_star
|
62cc9d7580680c20ce96d7507ce02863987c46bb
|
4c16b8ab0048669bc1c15d45af393dde896e4334
|
refs/heads/master
| 2020-06-22T08:34:49.985748
| 2019-07-15T02:52:59
| 2019-07-15T02:52:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,696
|
py
|
import torch.nn.functional as F
import argparse
from texttable import Texttable
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def str2actication(v):
if v.lower() == "relu":
return F.relu
if v.lower() == "elu":
return F.elu
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def tab_printer(args):
"""
Function to print the logs in a nice tabular format.
:param args: Parameters used for the model.
"""
args = vars(args)
keys = sorted(args.keys())
t = Texttable()
t.add_rows([["Parameter", "Value"]] + [[k.replace("_", " ").capitalize(), str(args[k])] for k in keys])
print(t.draw())
parser = argparse.ArgumentParser(description='GSN args.')
parser.add_argument('--device', type=int, default="0")
parser.add_argument('--num_star', type=int, default=1)
parser.add_argument('--num_relations', type=int, default=1)
parser.add_argument('--one_hot_node', type=str2bool, default=False)
parser.add_argument('--one_hot_node_num', type=int, default=0)
parser.add_argument('--cross_star', type=str2bool, default=True)
parser.add_argument('--dropout', type=float, default=0)
parser.add_argument('--coef_dropout', type=float, default=0)
parser.add_argument('--residual', type=str2bool, default=True)
parser.add_argument('--residual_star', type=str2bool, default=True)
parser.add_argument('--layer_norm', type=str2bool, default=True)
parser.add_argument('--layer_norm_star', type=str2bool, default=True)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--use_e', type=str2bool, default=False)
parser.add_argument('--heads', type=int, default=4)
parser.add_argument('--hidden', type=int, default=1024)
parser.add_argument('--activation', type=str2actication, default="elu")
parser.add_argument('--num_layers', type=int, default=6)
parser.add_argument('--cross_layer', type=str2bool, default=True)
parser.add_argument('--l2', type=float, default=0)
parser.add_argument('--patience', type=int, default=100)
parser.add_argument('--additional_self_loop_relation_type', type=str2bool, default=True)
parser.add_argument('--additional_node_to_star_relation_type', type=str2bool, default=True)
parser.add_argument('--star_init_method', type=str, default="attn")
parser.add_argument('--relation_score_function', type=str, default="DistMult",
help="DistMult")
parser.add_argument('--dataset', type=str,default="")
parser.add_argument('--epochs', type=int, default=2000)
|
[
"noone@noone.com"
] |
noone@noone.com
|
6c55f2b2b69d2351eae808a8e6a9a30f83003379
|
8352d1f51d3dc3b248bf51894a92d2ebea28141c
|
/venv/Lib/site-packages/pylablib/core/dataproc/__init__.py
|
cdc5e96eaa2025754720e9884887fc06e603068c
|
[] |
no_license
|
jakfarshodiq230/python-LSA_TF-IDF
|
04dd8f0982cca8c53dc7d6a06cff8bf511830f36
|
5fa4a748249dff60ac70d8b98eca92c786fc9d42
|
refs/heads/master
| 2022-12-25T19:10:35.636133
| 2020-09-29T17:24:43
| 2020-09-29T17:24:43
| 299,683,621
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,399
|
py
|
from . import waveforms
from .waveforms import is_ascending, is_descending, is_ordered, is_linear
from .waveforms import get_x_column, get_y_column
from .waveforms import find_closest_arg, find_closest_value, get_range_indices, cut_to_range, cut_out_regions
from .waveforms import find_discrete_step, unwrap_mod_data
from .waveforms import xy2c, c2xy
from . import fourier
from .fourier import fourier_transform, inverse_fourier_transform, power_spectral_density
from . import filters
from .filters import convolution_filter, gaussian_filter, gaussian_filter_nd, low_pass_filter, high_pass_filter, sliding_average, median_filter
from .filters import decimate, binning_average, decimate_datasets, decimate_full, collect_into_bins, split_into_bins
from . import fitting
from .fitting import Fitter, get_best_fit
from . import callable as callable_func
from .callable import to_callable, MultiplexedCallable, JoinedCallable
from . import interpolate
from .interpolate import interpolate1D_func, interpolate1D, interpolate2D, interpolateND, regular_grid_from_scatter, interpolate_trace
from . import specfunc
from .specfunc import get_kernel_func, get_window_func
from . import feature as feature_detect
from .feature import get_baseline_simple, subtract_baseline, find_peaks_cutoff, multi_scale_peakdet, rescale_peak, peaks_sum_func, find_local_extrema, find_state_hysteretic, trigger_hysteretic
|
[
"jakfarshodiq230@gmail.com"
] |
jakfarshodiq230@gmail.com
|
3c0299ef461117c90a7b387d733390990e96c5a2
|
c4ea117eff2469d622f3d7986d23dca79fdd9d2f
|
/setConfigSN.py
|
466123d905f427386da1cb1990ac7ee1a7f7d8fd
|
[] |
no_license
|
SimengBian/Mobihoc18
|
2a850646d42e9731fbb7a69c42a19830cc444f7d
|
28c3d5234bb8baa411b266b56a9887c2b2224562
|
refs/heads/master
| 2021-08-31T06:24:49.447762
| 2017-12-20T14:42:01
| 2017-12-20T14:42:01
| 110,240,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
import numpy as np
filename = "config1/"
'''
Substrate Network (SN)
'''
numOfServer = 6 # number of servers
serverCapacities = np.zeros(numOfServer)
for c in range(numOfServer):
serverCapacities[c] = 16
idleEnergies = np.zeros(numOfServer)
for c in range(numOfServer):
idleEnergies[c] = 0.805
maxEnergies = np.zeros(numOfServer)
for c in range(numOfServer):
maxEnergies[c] = 27.35
np.savez(filename + "SN Information.npz", numOfServer=numOfServer, serverCapacities=serverCapacities, idleEnergies=idleEnergies, maxEnergies=maxEnergies)
|
[
"biansimeng@biansimengdeMacBook-Air.local"
] |
biansimeng@biansimengdeMacBook-Air.local
|
bcd67aa5afe669f6003298838868899baedc2815
|
6d17d2b811376f176a998c30009a6ed4c41c82c1
|
/common/pub/excelconfig.py
|
4250fd740aa3872d920a3dcf343ef88a79d8ccf4
|
[
"Apache-2.0"
] |
permissive
|
no-wings/AutoUitest
|
39e9c89fa529362eb076482218910bb4ab298393
|
b3ff7db29f165bd40b89a1e33463efdaf801b334
|
refs/heads/master
| 2022-12-12T22:16:31.134368
| 2020-09-07T03:43:12
| 2020-09-07T03:59:32
| 293,417,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
# coding=utf-8
import xlwt
# 导出数据到excel
def export(fields, results, table_name, outputpath):
'''
:param fields:数据库取出的字段值
:param results: 列表元组或者元组元组
:param table_name:
:param outputpath:
:return:
'''
# 搜取所有结果
# 获取MYSQL里面的数据字段名称
workbook = xlwt.Workbook()
sheet = workbook.add_sheet('table_' + table_name, cell_overwrite_ok=True)
# 写上字段信息
for field in range(0, len(fields)):
sheet.write(0, field, fields[field][0])
# 获取并写入数据段信息
row = 1
col = 0
for row in range(1, len(results) + 1):
for col in range(0, len(fields)):
sheet.write(row, col, u'%s' % results[row - 1][col])
workbook.save(outputpath)
|
[
"45419770+xiongwr@users.noreply.github.com"
] |
45419770+xiongwr@users.noreply.github.com
|
046f7722f8d047a95d007f78b54691d31345e08e
|
cc1f1ee71ddc572e33512a44af41108bf1e3173b
|
/semeval23/clickbait-spoiling-eval.py
|
e58a9b1693f0f343e56ce78d849437b6b5b61b0e
|
[
"MIT"
] |
permissive
|
pan-webis-de/pan-code
|
774252979edf2e6b757ec04332e8c572d57e91d7
|
8dd0d44a098620ead0d1b07999f122f3f31c83a7
|
refs/heads/master
| 2023-05-29T09:12:43.081488
| 2023-05-28T08:03:17
| 2023-05-28T08:03:17
| 240,560,328
| 34
| 34
|
MIT
| 2023-03-30T08:45:06
| 2020-02-14T17:11:37
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 12,636
|
py
|
#!/usr/bin/env python3
import argparse
from os.path import exists
from glob import glob
from os.path import isdir
from sklearn.metrics import balanced_accuracy_score, precision_score, recall_score, f1_score
import json
from nltk.translate.bleu_score import sentence_bleu
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
from bert_score import score
import subprocess
import tempfile
from copy import deepcopy
def error(msg):
print(' [\033[91mx\033[0m] ' + msg)
exit(1)
def success(msg):
print(' [\033[92mo\033[0m] ' + msg)
def load_json_lines(f):
if not exists(f):
error('The file "' + f + '" does not exist.')
ret = []
num = 1
if isdir(f):
f = glob(f + '/*.json*')
if len(f) != 1:
error('The input is an directory that contains multiple json files. Please create only a single json file. Got ' + str(f))
f = f[0]
with open(f, 'r') as inp:
for l in inp:
try:
ret += [json.loads(l)]
except:
error('Invalid line ' + str(num) + ' in "' + f + '" with content: ' + l.strip())
num += 1
success('The file ' + f + ' is in JSONL format.')
return ret
def spoiler_predictions_to_map(l, error=error, field='spoilerType'):
if l is None or len(l) == 0:
error('Spoiler predictions are empty.')
uuids = []
for i in l:
if 'uuid' not in i.keys() or field not in i.keys():
error(f'Spoiler predictions do not have all required fields. Expected fields "uuid" and "{field}". Got: ' + str(i))
return
uuids += [i['uuid']]
if len(l) != len(set(uuids)):
error('Spoiler predictions have dupliates. I found ' + str(len(l)) + ' entries but only ' + str(len(set(uuids))) + ' unique uuids.')
return
success('Spoiler predictions have correct format. Found ' + str(len(l)))
return {i['uuid']: i[field] if type(i[field]) is not list else i[field][0] for i in l}
def normalize_spoiler_generation(i, error, expected_spoiler_type=None):
if 'uuid' not in i or 'spoiler' not in i:
error('Spoiler generation does not have all required fields. Expected fields are uuid and spoiler. Got: ' + str(i))
return
if expected_spoiler_type and expected_spoiler_type not in i['tags']:
return True
return {i['uuid']: i['spoiler']}
def spoiler_generations_to_map(l, error=error, expected_spoiler_type=None):
if l is None or len(l) == 0:
error('Spoiler predictions are empty.')
uuids = []
for i in deepcopy(l):
i = normalize_spoiler_generation(i, error, expected_spoiler_type)
if not i:
return
elif i is True:
continue
uuids += list(i.keys())
if not expected_spoiler_type and len(l) != len(set(uuids)):
error('Spoiler generations have dupliates. I found ' + str(len(l)) + ' entries but only ' + str(len(set(uuids))) + ' unique uuids.')
l = [normalize_spoiler_generation(i, error, expected_spoiler_type) for i in l]
l = [i for i in l if i and i is not True]
success('Spoiler generations have correct format. Found ' + str(len(l)))
ret = {}
for i in l:
for k, v in i.items():
assert k not in ret
ret[k] = v
return ret
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate submissions to the clickbait spoiling task.')
parser.add_argument('--input_run', type=str, help='The input run (expected in jsonl format) produced by a system that should be evaluated.', required=True)
parser.add_argument('--ground_truth_classes', type=str, help='The ground truth classes used to evaluate submissions to task 1 (spoiler type generation). For the evaluation of task 2 (spoiler generation), this can be different from "--ground_truth_spoilers" to evaluate the effectiveness using real spoiler predictions.', required=False)
parser.add_argument('--ground_truth_spoilers', type=str, help='The ground truth spoilers used to evaluate submissions to task 2 (spoiler generation).', required=False)
parser.add_argument('--task', type=str, help='The task to evaluate. Choose 1 (spoiler type classification) or 2 (spoiler generation).', choices=['1', '2'], required=True)
parser.add_argument('--output_prototext', type=str, help='Write evalualuation results as prototext file to this location.', required=False)
return parser.parse_args()
def to_prototext(d):
ret = ''
for k, v in d.items():
ret += 'measure{\n key: "' + str(k) + '"\n value: "' + str(v) + '"\n}\n'
return ret.strip()
def filter_to(y_true, y_pred, filter_value):
y_true_filtered, y_pred_filtered = [], []
for i in range(len(y_true)):
if y_true[i] == filter_value or y_pred[i] == filter_value:
y_true_filtered += [1 if y_true[i] == filter_value else 0]
y_pred_filtered += [1 if y_pred[i] == filter_value else 0]
return (y_true_filtered, y_pred_filtered)
def precision_on(y_true, y_pred, filter_value):
y_true_filtered, y_pred_filtered = filter_to(y_true, y_pred, filter_value)
return precision_score(y_true_filtered, y_pred_filtered)
def recall_on(y_true, y_pred, filter_value):
y_true_filtered, y_pred_filtered = filter_to(y_true, y_pred, filter_value)
return recall_score(y_true_filtered, y_pred_filtered)
def f1_on(y_true, y_pred, filter_value):
y_true_filtered, y_pred_filtered = filter_to(y_true, y_pred, filter_value)
return f1_score(y_true_filtered, y_pred_filtered)
def create_protobuf_for_task_1(actual, expected):
keys = sorted(actual.keys())
missing_predictions = 0
y_true = []
y_pred = []
for k in keys:
y_true += [expected[k]]
if k in actual:
y_pred += [actual[k]]
else:
missing_predictions += 1
y_pred += ['']
return {
"result-size": len(keys),
'balanced-accuracy': balanced_accuracy_score(y_true, y_pred),
'precision-for-phrase-spoilers': precision_on(y_true, y_pred, 'phrase'),
'recall-for-phrase-spoilers': recall_on(y_true, y_pred, 'phrase'),
'f1-for-phrase-spoilers': f1_on(y_true, y_pred, 'phrase'),
'precision-for-passage-spoilers': precision_on(y_true, y_pred, 'passage'),
'recall-for-passage-spoilers': recall_on(y_true, y_pred, 'passage'),
'f1-for-passage-spoilers': f1_on(y_true, y_pred, 'passage'),
'precision-for-multi-spoilers': precision_on(y_true, y_pred, 'multi'),
'recall-for-multi-spoilers': recall_on(y_true, y_pred, 'multi'),
'f1-for-multi-spoilers': f1_on(y_true, y_pred, 'multi'),
'missing-predictions': missing_predictions
}
def eval_task_1(input_run, ground_truth_classes, output_file):
input_run = spoiler_predictions_to_map(input_run)
ret = None
if ground_truth_classes == None:
success('No ground-truth is passed. I tested the input run and the input run is valid.')
ret = to_prototext({"result-size": len(input_run.keys())})
else:
ground_truth_classes = spoiler_predictions_to_map(ground_truth_classes, field='tags')
ret = to_prototext(create_protobuf_for_task_1(input_run, ground_truth_classes))
if output_file:
with open(output_file, 'w') as f:
f.write(ret)
def bleu_score(truth, prediction):
"""
From: https://github.com/webis-de/acl22-clickbait-spoiling/blob/470f488bd532da1e75812de6a94458ec80fdb2b9/evaluation/meteor-metric.py#L72
"""
def stopfilter(tokens):
tmp = [token for token in tokens if token not in stopwords.words('english')]
res = [token.lower() for token in tmp if token not in string.punctuation]
return res
def make_score(trut, predi):
if len(trut) > 3 and len(predi) > 3:
weights = (1./4., 1./4., 1./4., 1./4.)
elif len(trut) > 2 and len(predi) > 2:
weights = (1./3., 1./3., 1./3.)
elif len(trut) > 1 and len(predi) > 1:
weights = (1./2., 1./2.)
else:
weights = (1., 0.)
if (len(weights) == 4) and (len(trut) < 4 or len(predi) < 4):
print(trut)
print(predi)
print(weights)
print('\n')
return sentence_bleu([trut], predi, weights=weights)
score = 0.
lem_score = 0.
write_dict = {'single_scores': {}, 'scores': {}}
for i in range(len(truth)):
real_answer = truth[i]
if type(real_answer) is list:
real_answer = ' '.join(real_answer)
pred_answer = prediction[i]
if type(pred_answer) is list:
pred_answer = ' '.join(pred_answer)
lem_truth_tokens = stopfilter(word_tokenize(real_answer.replace('\n', '')))
lem_prediction_tokens = stopfilter(word_tokenize(pred_answer.replace('\n', '')))
i_lem_score = make_score(lem_truth_tokens, lem_prediction_tokens)
lem_score += i_lem_score
return lem_score / len(truth)
def bert_score(truth, prediction):
assert len(truth) == len(prediction)
prec, rec, f1 = score(prediction, truth, lang="en")
return float(f1.mean())
def meteor_score(truth, prediction):
with tempfile.TemporaryDirectory() as tmpdirname:
assert len(truth) == len(prediction)
with open(tmpdirname + '/truths.txt', 'w') as truths, open(tmpdirname + '/preds.txt', 'w') as preds:
for t in truth:
truths.write(t + '\n')
for p in prediction:
preds.write(p + '\n')
cmd = ['java', '-Xmx2G', '-jar', '/meteor-1.5.jar', tmpdirname + '/truths.txt', tmpdirname + '/preds.txt', '-l', 'en', '-norm', '-t', 'adq']
meteor_output = subprocess.check_output(cmd).decode('utf-8')
try:
return float(meteor_output.split('\n\nFinal score:')[1].strip())
except:
raise ValueError('Could not extract the final score out of "' + meteor_output + '".')
def create_protobuf_for_task_2(actual, expected):
keys = sorted(expected.keys())
missing_predictions = 0
y_true = []
y_pred = []
for k in keys:
exp = expected[k]
if type(exp) is list:
exp = ' '.join(exp)
y_true += [exp.replace('\n', ' ').strip()]
if k in actual:
act = actual[k]
if type(act) is list:
act = ' '.join(act)
y_pred += [act.replace('\n', ' ').strip()]
else:
missing_predictions += 1
y_pred += ['']
return {
"result-size": len(keys),
'bleu-score': bleu_score(y_true, y_pred),
'bert-score': bert_score(y_true, y_pred),
'meteor-score': meteor_score(y_true, y_pred),
'missing-predictions': missing_predictions
}
def eval_task_2(input_run, ground_truth_classes, ground_truth_spoilers, output_file):
input_run = spoiler_generations_to_map(input_run)
if ground_truth_spoilers == None:
ret = to_prototext({"result-size": len(input_run.keys())})
success('No ground-truth is passed. I tested the input run and the input run is valid.')
else:
ret = {}
for (display_name, tag_name) in [('all-spoilers', None), ('phrase-spoilers', 'phrase'), ('passage-spoilers', 'passage'), ('multi-spoilers', 'multi')]:
print('Run evaluation for ' + display_name)
filtered_ground_truth_spoilers = spoiler_generations_to_map(deepcopy(ground_truth_spoilers), expected_spoiler_type=tag_name)
for k,v in create_protobuf_for_task_2(input_run, filtered_ground_truth_spoilers).items():
ret[k + '-' + display_name] = v
ret = to_prototext(ret)
if output_file:
with open(output_file, 'w') as f:
f.write(ret)
if __name__ == '__main__':
args = parse_args()
input_run = load_json_lines(args.input_run)
ground_truth_classes = None if not args.ground_truth_classes else load_json_lines(args.ground_truth_classes)
ground_truth_spoilers = None if not args.ground_truth_spoilers else load_json_lines(args.ground_truth_spoilers)
if args.task == '1':
eval_task_1(input_run, ground_truth_classes, args.output_prototext)
elif args.task == '2':
eval_task_2(input_run, ground_truth_classes, ground_truth_spoilers, args.output_prototext)
else:
error('Unknown task. Expected 1 or 2. Got: ' + str(args.task))
|
[
"maik.froebe@informatik.uni-halle.de"
] |
maik.froebe@informatik.uni-halle.de
|
4923d7378b7b003f9b938d0cb2b4221e31da211e
|
bbc6498bfe3bd62b7c99e2eadd79be4d154ddc11
|
/functional_tests.py
|
61199afae4185173410f8558e79a16457d33042e
|
[] |
no_license
|
jawahar/tdd_superlists
|
993d0900782ea907911e95ffb383591d9e71c93f
|
99570339844430c33e9e5cf3d9e095ded0c9394e
|
refs/heads/master
| 2021-01-13T02:24:26.109972
| 2013-07-05T19:52:15
| 2013-07-05T19:52:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,400
|
py
|
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
# get the homepage of the app
self.browser.get('http://localhost:8000')
self.assertIn('To-Do', self.browser.title)
# check header of page
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# enter some input into the page
input_box = self.browser.find_element_by_id('id_new_item')
self.assertEqual(input_box.get_attribute('placeholder'), 'Enter a to-do item')
input_box.send_keys('Buy peacock feathers')
input_box.send_keys(Keys.ENTER)
# get the table from the response
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertTrue(any(row.text == '1: Buy peacock feathers' for row in rows),
"New to-do item did not appear in table")
self.fail('Finish the test!')
if __name__ == '__main__':
unittest.main()
browser = webdriver.Firefox()
browser.get('http://localhost:8000')
assert 'Django' in browser.title
browser.quit()
|
[
"jawahar.malhotra@gmail.com"
] |
jawahar.malhotra@gmail.com
|
89d50378869ed6dde8fc8a1ec6f17758ef6117c1
|
33a7d059f6b3decfe762861c823bc4b5b0680633
|
/UAS1.py
|
fdf4c8bc6a9bb3142282c82cd04238bda41bab7b
|
[] |
no_license
|
Diyah9/uass
|
01ef176ee70ff523dd39388866222567f997be25
|
1ac124325870cac22e5d4f98f32d5c1c5f94c6b5
|
refs/heads/master
| 2020-03-11T08:46:49.326714
| 2018-04-17T11:14:00
| 2018-04-17T11:14:00
| 129,892,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
def fb():
print("Modul Nilai Random")
import random
nilai = input("Masukan nilai n: ")
for i in range (nilai) :
while 1:
a = random.random()
if a < 0.5:
break
print (a)
|
[
"diyahnur0809@gmail.com"
] |
diyahnur0809@gmail.com
|
a9beba1a67b641f22baf82bb07689f9350c7b49f
|
79d5395cf97828672680d985603106d60834b3f6
|
/HW3/RBTree.py
|
6f38869c5cf3835c4255a6a988d63b9d5ed70f96
|
[] |
no_license
|
sunwmax/Data-Structures
|
86449175c86134658f62533cfbc067eeeaeeb4b4
|
1f23ce2b44b78ad8122275db38274d41bd35b6be
|
refs/heads/main
| 2023-01-31T01:31:19.020602
| 2020-12-15T03:12:19
| 2020-12-15T03:12:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,887
|
py
|
"""
A classic (not left-leaning) Red-Black Tree implementation, supporting addition and deletion.
"""
# The possible Node colors
BLACK = 'BLACK'
RED = 'RED'
NIL = 'NIL'
class Node:
def __init__(self, value, color, parent, left=None, right=None):
self.value = value
self.color = color
self.parent = parent
self.left = left
self.right = right
def __repr__(self):
return '{color} {val} Node'.format(color=self.color, val=self.value)
def __iter__(self):
if self.left.color != NIL:
yield from self.left.__iter__()
yield self.value
if self.right.color != NIL:
yield from self.right.__iter__()
def __eq__(self, other):
if self.color == NIL and self.color == other.color:
return True
if self.parent is None or other.parent is None:
parents_are_same = self.parent is None and other.parent is None
else:
parents_are_same = self.parent.value == other.parent.value and self.parent.color == other.parent.color
return self.value == other.value and self.color == other.color and parents_are_same
def has_children(self) -> bool:
""" Returns a boolean indicating if the node has children """
return bool(self.get_children_count())
def get_children_count(self) -> int:
""" Returns the number of NOT NIL children the node has """
if self.color == NIL:
return 0
return sum([int(self.left.color != NIL), int(self.right.color != NIL)])
class RedBlackTree:
# every node has null nodes as children initially, create one such object for easy management
NIL_LEAF = Node(value=None, color=NIL, parent=None)
def __init__(self):
self.count = 0
self.root = None
self.ROTATIONS = {
# Used for deletion and uses the sibling's relationship with his parent as a guide to the rotation
'L': self._right_rotation,
'R': self._left_rotation
}
def __iter__(self):
if not self.root:
return list()
yield from self.root.__iter__()
def add(self, value):
if not self.root:
self.root = Node(value, color=BLACK, parent=None, left=self.NIL_LEAF, right=self.NIL_LEAF)
self.count += 1
return
parent, node_dir = self._find_parent(value)
if node_dir is None:
return # value is in the tree
new_node = Node(value=value, color=RED, parent=parent, left=self.NIL_LEAF, right=self.NIL_LEAF)
if node_dir == 'L':
parent.left = new_node
else:
parent.right = new_node
self._try_rebalance(new_node)
self.count += 1
def remove(self, value):
"""
Try to get a node with 0 or 1 children.
Either the node we're given has 0 or 1 children or we get its successor.
"""
node_to_remove = self.find_node(value)
if node_to_remove is None: # node is not in the tree
return
if node_to_remove.get_children_count() == 2:
# find the in-order successor and replace its value.
# then, remove the successor
successor = self._find_in_order_successor(node_to_remove)
node_to_remove.value = successor.value # switch the value
node_to_remove = successor
# has 0 or 1 children!
self._remove(node_to_remove)
self.count -= 1
def contains(self, value) -> bool:
""" Returns a boolean indicating if the given value is present in the tree """
return bool(self.find_node(value))
def ceil(self, value) -> int or None:
"""
Given a value, return the closest value that is equal or bigger than it,
returning None when no such exists
"""
if self.root is None: return None
last_found_val = None if self.root.value < value else self.root.value
def find_ceil(node):
nonlocal last_found_val
if node == self.NIL_LEAF:
return None
if node.value == value:
last_found_val = node.value
return node.value
elif node.value < value:
# go right
return find_ceil(node.right)
else:
# this node is bigger, save its value and go left
last_found_val = node.value
return find_ceil(node.left)
find_ceil(self.root)
return last_found_val
def floor(self, value) -> int or None:
"""
Given a value, return the closest value that is equal or less than it,
returning None when no such exists
"""
if self.root is None: return None
last_found_val = None if self.root.value > value else self.root.value
def find_floor(node):
nonlocal last_found_val
if node == self.NIL_LEAF:
return None
if node.value == value:
last_found_val = node.value
return node.value
elif node.value < value:
# this node is smaller, save its value and go right, trying to find a cloer one
last_found_val = node.value
return find_floor(node.right)
else:
return find_floor(node.left)
find_floor(self.root)
return last_found_val
def _remove(self, node):
"""
Receives a node with 0 or 1 children (typically some sort of successor)
and removes it according to its color/children
:param node: Node with 0 or 1 children
"""
left_child = node.left
right_child = node.right
not_nil_child = left_child if left_child != self.NIL_LEAF else right_child
if node == self.root:
if not_nil_child != self.NIL_LEAF:
# if we're removing the root and it has one valid child, simply make that child the root
self.root = not_nil_child
self.root.parent = None
self.root.color = BLACK
else:
self.root = None
elif node.color == RED:
if not node.has_children():
# Red node with no children, the simplest remove
self._remove_leaf(node)
else:
"""
Since the node is red he cannot have a child.
If he had a child, it'd need to be black, but that would mean that
the black height would be bigger on the one side and that would make our tree invalid
"""
raise Exception('Unexpected behavior')
else: # node is black!
if right_child.has_children() or left_child.has_children(): # sanity check
raise Exception('The red child of a black node with 0 or 1 children'
' cannot have children, otherwise the black height of the tree becomes invalid! ')
if not_nil_child.color == RED:
"""
Swap the values with the red child and remove it (basically un-link it)
Since we're a node with one child only, we can be sure that there are no nodes below the red child.
"""
node.value = not_nil_child.value
node.left = not_nil_child.left
node.right = not_nil_child.right
else: # BLACK child
# 6 cases :o
self._remove_black_node(node)
def _remove_leaf(self, leaf):
""" Simply removes a leaf node by making it's parent point to a NIL LEAF"""
if leaf.value >= leaf.parent.value:
# in those weird cases where they're equal due to the successor swap
leaf.parent.right = self.NIL_LEAF
else:
leaf.parent.left = self.NIL_LEAF
def _remove_black_node(self, node):
"""
Loop through each case recursively until we reach a terminating case.
What we're left with is a leaf node which is ready to be deleted without consequences
"""
self.__case_1(node)
self._remove_leaf(node)
def __case_1(self, node):
r"""
Case 1 is when there's a double black node on the root
Because we're at the root, we can simply remove it
and reduce the black height of the whole tree.
__|10B|__ __10B__
/ \ ==> / \
9B 20B 9B 20B
"""
if self.root == node:
node.color = BLACK
return
self.__case_2(node)
def __case_2(self, node):
r"""
Case 2 applies when
the parent is BLACK
the sibling is RED
the sibling's children are BLACK or NIL
It takes the sibling and rotates it
40B 60B
/ \ --CASE 2 ROTATE--> / \
|20B| 60R LEFT ROTATE 40R 80B
DBL BLACK IS 20----^ / \ SIBLING 60R / \
50B 80B |20B| 50B
(if the sibling's direction was left of it's parent, we would RIGHT ROTATE it)
Now the original node's parent is RED
and we can apply case 4 or case 6
"""
parent = node.parent
sibling, direction = self._get_sibling(node)
if sibling.color == RED and parent.color == BLACK and sibling.left.color != RED and sibling.right.color != RED:
self.ROTATIONS[direction](node=None, parent=sibling, grandfather=parent)
parent.color = RED
sibling.color = BLACK
return self.__case_1(node)
self.__case_3(node)
def __case_3(self, node):
r"""
Case 3 deletion is when:
the parent is BLACK
the sibling is BLACK
the sibling's children are BLACK
Then, we make the sibling red and
pass the double black node upwards
Parent is black
___50B___ Sibling is black ___50B___
/ \ Sibling's children are black / \
30B 80B CASE 3 30B |80B| Continue with other cases
/ \ / \ ==> / \ / \
20B 35R 70B |90B|<---REMOVE 20B 35R 70R X
/ \ / \
34B 37B 34B 37B
"""
parent = node.parent
sibling, _ = self._get_sibling(node)
if (sibling.color == BLACK and parent.color == BLACK
and sibling.left.color != RED and sibling.right.color != RED):
# color the sibling red and forward the double black node upwards
# (call the cases again for the parent)
sibling.color = RED
return self.__case_1(parent) # start again
self.__case_4(node)
def __case_4(self, node):
r"""
If the parent is red and the sibling is black with no red children,
simply swap their colors
DB-Double Black
__10R__ __10B__ The black height of the left subtree has been incremented
/ \ / \ And the one below stays the same
DB 15B ===> X 15R No consequences, we're done!
/ \ / \
12B 17B 12B 17B
"""
parent = node.parent
if parent.color == RED:
sibling, direction = self._get_sibling(node)
if sibling.color == BLACK and sibling.left.color != RED and sibling.right.color != RED:
parent.color, sibling.color = sibling.color, parent.color # switch colors
return # Terminating
self.__case_5(node)
def __case_5(self, node):
r"""
Case 5 is a rotation that changes the circumstances so that we can do a case 6
If the closer node is red and the outer BLACK or NIL, we do a left/right rotation, depending on the orientation
This will showcase when the CLOSER NODE's direction is RIGHT
___50B___ __50B__
/ \ / \
30B |80B| <-- Double black 35B |80B| Case 6 is now
/ \ / \ Closer node is red (35R) / \ / applicable here,
20B 35R 70R X Outer is black (20B) 30R 37B 70R so we redirect the node
/ \ So we do a LEFT ROTATION / \ to it :)
34B 37B on 35R (closer node) 20B 34B
"""
sibling, direction = self._get_sibling(node)
closer_node = sibling.right if direction == 'L' else sibling.left
outer_node = sibling.left if direction == 'L' else sibling.right
if closer_node.color == RED and outer_node.color != RED and sibling.color == BLACK:
if direction == 'L':
self._left_rotation(node=None, parent=closer_node, grandfather=sibling)
else:
self._right_rotation(node=None, parent=closer_node, grandfather=sibling)
closer_node.color = BLACK
sibling.color = RED
self.__case_6(node)
def __case_6(self, node):
r"""
Case 6 requires
SIBLING to be BLACK
OUTER NODE to be RED
Then, does a right/left rotation on the sibling
This will showcase when the SIBLING's direction is LEFT
Double Black
__50B__ | __35B__
/ \ | / \
SIBLING--> 35B |80B| <- 30R 50R
/ \ / / \ / \
30R 37B 70R Outer node is RED 20B 34B 37B 80B
/ \ Closer node doesn't /
20B 34B matter 70R
Parent doesn't
matter
So we do a right rotation on 35B!
"""
sibling, direction = self._get_sibling(node)
outer_node = sibling.left if direction == 'L' else sibling.right
def __case_6_rotation(direction):
parent_color = sibling.parent.color
self.ROTATIONS[direction](node=None, parent=sibling, grandfather=sibling.parent)
# new parent is sibling
sibling.color = parent_color
sibling.right.color = BLACK
sibling.left.color = BLACK
if sibling.color == BLACK and outer_node.color == RED:
return __case_6_rotation(direction) # terminating
raise Exception('We should have ended here, something is wrong')
def _try_rebalance(self, node):
"""
Given a red child node, determine if there is a need to rebalance (if the parent is red)
If there is, rebalance it
"""
parent = node.parent
value = node.value
if (parent is None # what the fuck? (should not happen)
or parent.parent is None # parent is the root
or (node.color != RED or parent.color != RED)): # no need to rebalance
return
grandfather = parent.parent
node_dir = 'L' if parent.value > value else 'R'
parent_dir = 'L' if grandfather.value > parent.value else 'R'
uncle = grandfather.right if parent_dir == 'L' else grandfather.left
general_direction = node_dir + parent_dir
if uncle == self.NIL_LEAF or uncle.color == BLACK:
# rotate
if general_direction == 'LL':
self._right_rotation(node, parent, grandfather, to_recolor=True)
elif general_direction == 'RR':
self._left_rotation(node, parent, grandfather, to_recolor=True)
elif general_direction == 'LR':
self._right_rotation(node=None, parent=node, grandfather=parent)
# due to the prev rotation, our node is now the parent
self._left_rotation(node=parent, parent=node, grandfather=grandfather, to_recolor=True)
elif general_direction == 'RL':
self._left_rotation(node=None, parent=node, grandfather=parent)
# due to the prev rotation, our node is now the parent
self._right_rotation(node=parent, parent=node, grandfather=grandfather, to_recolor=True)
else:
raise Exception("{} is not a valid direction!".format(general_direction))
else: # uncle is RED
self._recolor(grandfather)
def __update_parent(self, node, parent_old_child, new_parent):
"""
Our node 'switches' places with the old child
Assigns a new parent to the node.
If the new_parent is None, this means that our node becomes the root of the tree
"""
node.parent = new_parent
if new_parent:
# Determine the old child's position in order to put node there
if new_parent.value > parent_old_child.value:
new_parent.left = node
else:
new_parent.right = node
else:
self.root = node
def _right_rotation(self, node, parent, grandfather, to_recolor=False):
grand_grandfather = grandfather.parent
self.__update_parent(node=parent, parent_old_child=grandfather, new_parent=grand_grandfather)
old_right = parent.right
parent.right = grandfather
grandfather.parent = parent
grandfather.left = old_right # save the old right values
old_right.parent = grandfather
if to_recolor:
parent.color = BLACK
node.color = RED
grandfather.color = RED
def _left_rotation(self, node, parent, grandfather, to_recolor=False):
grand_grandfather = grandfather.parent
self.__update_parent(node=parent, parent_old_child=grandfather, new_parent=grand_grandfather)
old_left = parent.left
parent.left = grandfather
grandfather.parent = parent
grandfather.right = old_left # save the old left values
old_left.parent = grandfather
if to_recolor:
parent.color = BLACK
node.color = RED
grandfather.color = RED
def _recolor(self, grandfather):
grandfather.right.color = BLACK
grandfather.left.color = BLACK
if grandfather != self.root:
grandfather.color = RED
self._try_rebalance(grandfather)
def _find_parent(self, value):
""" Finds a place for the value in our binary tree"""
def inner_find(parent):
"""
Return the appropriate parent node for our new node as well as the side it should be on
"""
if value == parent.value:
return None, None
elif parent.value < value:
if parent.right.color == NIL: # no more to go
return parent, 'R'
return inner_find(parent.right)
elif value < parent.value:
if parent.left.color == NIL: # no more to go
return parent, 'L'
return inner_find(parent.left)
return inner_find(self.root)
def find_node(self, value):
def inner_find(root):
if root is None or root == self.NIL_LEAF:
return None
if value > root.value:
return inner_find(root.right)
elif value < root.value:
return inner_find(root.left)
else:
return root
found_node = inner_find(self.root)
return found_node
def _find_in_order_successor(self, node):
right_node = node.right
left_node = right_node.left
if left_node == self.NIL_LEAF:
return right_node
while left_node.left != self.NIL_LEAF:
left_node = left_node.left
return left_node
def _get_sibling(self, node):
"""
Returns the sibling of the node, as well as the side it is on
e.g
20 (A)
/ \
15(B) 25(C)
_get_sibling(25(C)) => 15(B), 'R'
"""
parent = node.parent
if node.value >= parent.value:
sibling = parent.left
direction = 'L'
else:
sibling = parent.right
direction = 'R'
return sibling, direction
|
[
"noreply@github.com"
] |
sunwmax.noreply@github.com
|
aee8bf0ea8399d18b36b4f33a71ddacbb7b77f8a
|
4ebb5e56f3276504462c83274171ff1d9a5373d4
|
/tasks.py
|
7a2ff8fa3d9ff4681ca6186e190553d99938d3fb
|
[] |
no_license
|
ig-novik/diploma_project
|
256de85938ffdd1bd541f3bd48c17eaa358f4f86
|
dd05e024a21df2d07eab942093930f629253f61a
|
refs/heads/master
| 2023-05-11T07:26:00.078473
| 2020-04-21T09:46:13
| 2020-04-21T09:46:13
| 247,621,035
| 0
| 0
| null | 2023-05-01T21:22:32
| 2020-03-16T05:47:34
|
Python
|
UTF-8
|
Python
| false
| false
| 784
|
py
|
from celery import Celery
from celery.schedules import crontab
from webapp import create_app
from webapp.news.parsers import avito
flask_app = create_app()
celery_app = Celery('tasks', broker='redis://localhost:6379/0')
celery_app.conf.update(flask_app.config)
@celery_app.task
def avito_snippets():
print("Вход в habr_snippets()")
with flask_app.app_context():
avito.get_ads_snippets()
@celery_app.task
def habr_content():
print("Вход в habr_content()")
with flask_app.app_context():
habr.get_news_content()
@celery_app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(crontab(minute='*/1'), avito_snippets.s())
sender.add_periodic_task(crontab(minute='*/2'), habr_content.s())
|
[
"ig_novik@mail.ru"
] |
ig_novik@mail.ru
|
1dd23f472b7de887c901515844d6b2999272e6bf
|
3fd00274d9e4e01dcefff0206feda739555392f5
|
/pygame/qgame/viz/unitary_grid.py
|
cd4711103706ac4ffed8765ef5ba8bffbc96f9bc
|
[] |
no_license
|
quantumjim/Qiskit-for-GameDev
|
981500ff65237c00214a89b81fda091a8ec84146
|
495dabde2c2925333b859a14e99cb1fc0fe2c6a1
|
refs/heads/master
| 2021-04-16T17:06:47.084395
| 2020-03-23T08:17:02
| 2020-03-23T08:17:02
| 249,372,148
| 2
| 0
| null | 2020-03-23T08:15:27
| 2020-03-23T08:15:26
| null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
#!/usr/bin/env python
#
# Copyright 2019 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pygame
from qiskit import BasicAer, execute
from ..utils.colors import *
from ..utils.fonts import ARIAL_16
from .. import comp_basis_states
class UnitaryGrid(pygame.sprite.Sprite):
"""Displays a unitary matrix grid"""
def __init__(self, circuit):
pygame.sprite.Sprite.__init__(self)
self.image = None
self.rect = None
self.basis_states = comp_basis_states(circuit.width())
self.set_circuit(circuit)
# def update(self):
# # Nothing yet
# a = 1
def set_circuit(self, circuit):
backend_unit_sim = BasicAer.get_backend('unitary_simulator')
job_sim = execute(circuit, backend_unit_sim)
result_sim = job_sim.result()
unitary = result_sim.get_unitary(circuit, decimals=3)
# print('unitary: ', unitary)
self.image = pygame.Surface([100 + len(unitary) * 50, 100 + len(unitary) * 50])
self.image.convert()
self.image.fill(WHITE)
self.rect = self.image.get_rect()
block_size = 30
x_offset = 50
y_offset = 50
for y in range(len(unitary)):
text_surface = ARIAL_16.render(self.basis_states[y], False, (0, 0, 0))
self.image.blit(text_surface,(x_offset, (y + 1) * block_size + y_offset))
for x in range(len(unitary)):
text_surface = ARIAL_16.render(self.basis_states[x], False, (0, 0, 0))
self.image.blit(text_surface, ((x + 1) * block_size + x_offset, y_offset))
rect = pygame.Rect((x + 1) * block_size + x_offset,
(y + 1) * block_size + y_offset,
abs(unitary[y][x]) * block_size,
abs(unitary[y][x]) * block_size)
if abs(unitary[y][x]) > 0:
pygame.draw.rect(self.image, BLACK, rect, 1)
|
[
"h.jun.ye@gmail.com"
] |
h.jun.ye@gmail.com
|
26719905a2d5c638006f568dfbcaef22e95094e7
|
f292e3dea8be9145b94f6d874ecca51c959fb0e7
|
/ATCrack/preprocess/label2voc.py
|
ed794ec8ec4a5bf4e5f919c5a4415806444a1dd8
|
[] |
no_license
|
xukefei01/crack-detection
|
d0bdd9f35cc7cc93cadcc4a67c18e280d4f6fcc5
|
28fb5ae2e4b2aa5d210ddd25ef9c7d10251454f5
|
refs/heads/main
| 2023-03-31T05:45:01.315343
| 2021-03-31T02:43:16
| 2021-03-31T02:43:16
| 353,202,673
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,606
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import glob
import json
import os
import os.path as osp
import sys
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
import argparse
import base64
import json
import os
import labelme
from labelme import utils
import cv2 as cv
from PIL import Image, ImageOps, ImageEnhance
from torchvision.transforms import RandomSizedCrop
from torchvision.transforms.functional import resized_crop, crop, resize
from tqdm import tqdm
def random_crop(img, mask, size, scale=(0.2, 0.8), ratio=(3. / 4., 4. / 3.), n_tries = 10, crack_px_percent = 0.3, resize=False):
n_total_crack = np.sum((mask > 0)[:])
img = Image.fromarray(img)
mask = Image.fromarray(mask)
results = []
img_w = img.size[0]
img_h = img.size[1]
for i in range(n_tries):
i, j, h, w = RandomSizedCrop.get_params(img, scale, ratio)
sub_img = resized_crop(img, i, j, h, w, size, Image.BILINEAR)
sub_mask = resized_crop(mask,i, j, h, w, size, Image.NEAREST)
sub_img = np.asarray(sub_img)
sub_mask = np.asarray(sub_mask)
tmp = np.asarray(img.crop((j, i, j + w, i + h)))
n_crack_pixels = np.sum((tmp>0)[:])
crk_ratio = float(n_crack_pixels)/n_total_crack
if crk_ratio < crack_px_percent:
print('missing')
continue
results.append((sub_img, sub_mask, (i, j, h, w)))
_img = np.asarray(resized_crop(img, 0, 0, img_h, img_w, size, Image.BILINEAR))
_mask = np.asarray(resized_crop(mask, 0, 0, img_h, img_w, size, Image.NEAREST))
_img = np.asarray(_img)
_mask = np.asarray(_mask)
results.append((_img, _mask, (0, 0, img_h, img_w)))
return results
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('input_dir', help='input annotated directory')
parser.add_argument('output_dir', help='output dataset directory')
parser.add_argument('--labels', help='labels file', required=True)
args = parser.parse_args()
# if osp.exists(args.output_dir):
# print('Output directory already exists:', args.output_dir)
# sys.exit(1)
os.makedirs(args.output_dir, exist_ok=True)
os.makedirs(osp.join(args.output_dir, 'images'), exist_ok=True)
os.makedirs(osp.join(args.output_dir, 'masks'), exist_ok=True)
#os.makedirs(osp.join(args.output_dir, 'SegmentationClassPNG'), exist_ok=True)
os.makedirs(osp.join(args.output_dir, 'SegmentationClassVisualization'), exist_ok=True)
print('Creating dataset:', args.output_dir)
class_names = []
class_name_to_id = {}
for i, line in enumerate(open(args.labels).readlines()):
class_id = i - 1 # starts with -1
class_name = line.strip()
class_name_to_id[class_name] = class_id
if class_id == -1:
assert class_name == '__ignore__'
continue
elif class_id == 0:
assert class_name == '_background_'
class_names.append(class_name)
class_names = tuple(class_names)
print('class_names:', class_names)
out_class_names_file = osp.join(args.output_dir, 'class_names.txt')
with open(out_class_names_file, 'w') as f:
f.writelines('\n'.join(class_names))
print('Saved class_names:', out_class_names_file)
colormap = labelme.utils.label_colormap(255)
for label_file in tqdm(list([path for path in glob.glob(osp.join(args.input_dir, '*.json'))])):
#if '9S6A2822' not in label_file:
# continue
#print('Generating dataset from:', label_file)
with open(label_file) as f:
base = osp.splitext(osp.basename(label_file))[0]
out_img_file = osp.join(
args.output_dir, 'images', base + '.jpg')
out_lbl_file = osp.join(
args.output_dir, 'SegmentationClass', base + '.npy')
out_png_file = osp.join(
args.output_dir, 'masks', base + '.jpg')
out_viz_file = osp.join(
args.output_dir,
'SegmentationClassVisualization',
base + '.jpg',
)
data = json.load(f)
##
if data['imageData']:
imageData = data['imageData']
else:
imagePath = os.path.join(os.path.dirname(label_file), data['imagePath'])
with open(imagePath, 'rb') as f:
imageData = f.read()
imageData = base64.b64encode(imageData).decode('utf-8')
img = utils.img_b64_to_arr(imageData)
label_name_to_value = {'_background_': 0}
for shape in sorted(data['shapes'], key=lambda x: x['label']):
label_name = shape['label']
if label_name in label_name_to_value:
label_value = label_name_to_value[label_name]
else:
label_value = len(label_name_to_value)
label_name_to_value[label_name] = label_value
lbl = utils.shapes_to_label(img.shape, data['shapes'], label_name_to_value)
#lb = cv.imread(join(*[args.label_dir, f'{path.stem}.png']))
#lb = cv.cvtColor(lb, cv.COLOR_BGR2GRAY)
lbl = (lbl > 0).astype(np.uint8) * 255
lbl = cv.morphologyEx(src=lbl, op=cv.MORPH_DILATE, kernel=cv.getStructuringElement(cv.MORPH_RECT, (20, 20)))
results = random_crop(img, lbl, size=(448, 448), n_tries=10)
#tq.update(1)
for sub_img, sub_mask, crop_info in results:
info = f'{crop_info[0]}_{crop_info[1]}_{crop_info[2]}_{crop_info[3]}'
cv.imwrite(filename=os.path.join(*[args.output_dir, 'images', f'{base}_{info}.jpg']), img=sub_img)
cv.imwrite(filename=os.path.join(*[args.output_dir, 'masks', f'{base}_{info}.jpg']), img=sub_mask)
#cnt += 1
plt.clf()
plt.imshow(sub_img)
plt.imshow(sub_mask, alpha=0.4)
plt.savefig(osp.join(args.output_dir, 'SegmentationClassVisualization', f'{base}_{info}' + '.jpg',))
#labelme.utils.lblsave(out_png_file, lbl)
#np.save(out_lbl_file, lbl)
#PIL.Image.fromarray(img).save(out_img_file)
#plt.show()
# viz = labelme.utils.draw_label(
# lbl, img, class_names, colormap=colormap)
# PIL.Image.fromarray(viz).save(out_viz_file)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
xukefei01.noreply@github.com
|
7484bacf4f4a3645406680fa8f7058b99cc91c34
|
c9500ad778b8521aaa85cb7fe3239989efaa4799
|
/plugins/trendmicro_deepsecurity/icon_trendmicro_deepsecurity/actions/search_computers/action.py
|
9b7c4476a3b6b40428738fb69a0845ee0eff2240
|
[
"MIT"
] |
permissive
|
rapid7/insightconnect-plugins
|
5a6465e720f114d71b1a82fe14e42e94db104a0b
|
718d15ca36c57231bb89df0aebc53d0210db400c
|
refs/heads/master
| 2023-09-01T09:21:27.143980
| 2023-08-31T10:25:36
| 2023-08-31T10:25:36
| 190,435,635
| 61
| 60
|
MIT
| 2023-09-14T08:47:37
| 2019-06-05T17:05:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,708
|
py
|
import komand
from .schema import SearchComputersInput, SearchComputersOutput, Input, Output, Component
# Custom imports below
import json
import requests
from komand.exceptions import PluginException
from icon_trendmicro_deepsecurity.util.shared import tryJSON
from icon_trendmicro_deepsecurity.util.shared import checkResponse
class SearchComputers(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="search_computers",
description=Component.DESCRIPTION,
input=SearchComputersInput(),
output=SearchComputersOutput(),
)
def run(self, params={}):
"""
Searches for Computers in Deep Security
"""
# Get parameters
self.information = params.get(Input.INFORMATION)
self.max_items = params.get(Input.MAX_ITEMS)
self.field_name = params.get(Input.FIELD_NAME)
self.search_type = params.get(Input.SEARCH_TYPE)
self.string_value = params.get(Input.STRING_VALUE)
self.number_value = params.get(Input.NUMBER_VALUE)
computer_ids = set()
# Prepare request
url = f"{self.connection.dsm_url}/api/computers/search?expand={self.information}"
if self.field_name:
if self.search_type == "string" and self.string_value:
# Search for computers by string match
data = {
"maxItems": self.max_items,
"searchCriteria": [
{
"fieldName": self.field_name,
"stringWildcards": True,
"stringValue": self.string_value,
}
],
}
elif self.search_type == "integer" and self.number_value:
# Search for computers by number match
data = {
"maxItems": self.max_items,
"searchCriteria": [
{
"fieldName": self.field_name,
"stringWildcards": True,
"numericValue": self.number_value,
}
],
}
else:
raise PluginException(
cause="Scan type and matching seach value expected but not found!",
assistance="Please select a search type and pass the matching string/number value to search for.",
)
else:
# List all computers
data = {"maxItems": self.max_items}
# Send request
response = requests.post(
url, data=json.dumps(data), verify=self.connection.dsm_verify_ssl, headers=self.connection.headers
)
self.logger.info(f"url: {response.url}")
self.logger.info(f"status: {response.status_code}")
self.logger.info(f"reason: {response.reason}")
# Check response errors
checkResponse(response)
# Try to convert the response data to JSON
response_data = tryJSON(response)
# Extract computer IDs
if response_data["computers"]:
hits = len(response_data["computers"])
self.logger.info(f"Found {hits} computer(s)!")
for computer in response_data["computers"]:
self.logger.info(f"{computer['ID']} - {computer['hostName']}")
computer_ids.add(computer["ID"])
else:
self.logger.info("No computer found!")
# Return matched rules
return {Output.COMPUTER_IDS: list(computer_ids), Output.RESPONSE_JSON: response_data}
|
[
"noreply@github.com"
] |
rapid7.noreply@github.com
|
10295dd3df62eb9decab66cd53c16ebcf00d962b
|
39ffb606a790b3f1c1afb0e9a82051bd4c6a5e87
|
/src/blog/migrations/0003_auto_20190907_0343.py
|
8509645a8b7bf05e84f41f802f1192706fbb5f06
|
[
"MIT"
] |
permissive
|
km427/django_blog
|
2ec79b6d82566fe7cf4c496917d36bc83296268c
|
fca83dbc312747493cc660c6fc74553cc7a91c42
|
refs/heads/master
| 2020-08-28T00:01:24.974086
| 2019-09-07T01:47:32
| 2019-09-07T01:47:32
| 217,527,579
| 1
| 0
|
MIT
| 2019-10-25T12:21:13
| 2019-10-25T12:21:13
| null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
# Generated by Django 2.2.5 on 2019-09-06 22:13
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20190907_0050'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='date',
field=models.DateTimeField(default=datetime.datetime(2019, 9, 7, 3, 43, 32, 555196)),
),
]
|
[
"roony0782045336@gmail.com"
] |
roony0782045336@gmail.com
|
e1bbe2aa9d7c65d5b20d4909fb5bf6b1365c4942
|
dde9ab902d560bb391bdfac0fb2e03b3d797910d
|
/assignment1/autodiff_test.py
|
63c6bd9789976efc740b7b847701a0d5112bb7fd
|
[] |
no_license
|
OlivierShi/deeplearning-sys
|
fa92f6ecf166bbd1a78286d01987ddbbb41fa48c
|
737ae1da4bd15849dee17b6884cff51596324c80
|
refs/heads/master
| 2021-06-17T20:17:24.172506
| 2017-06-12T10:00:04
| 2017-06-12T10:00:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,128
|
py
|
import autodiff as ad
import numpy as np
def test_identity():
x2 = ad.Variable(name="x2")
y = x2
grad_x2, = ad.gradients(y, [x2])
executor = ad.Executor([y, grad_x2])
x2_val = 2 * np.ones(3)
y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val})
# print y_val
# print grad_x2_val
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x2_val)
assert np.array_equal(grad_x2_val, np.ones_like(x2_val))
test_identity()
def test_add_by_const():
x2 = ad.Variable(name="x2")
y = 5 + x2
grad_x2, = ad.gradients(y, [x2])
executor = ad.Executor([y, grad_x2])
x2_val = 2 * np.ones(3)
y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val})
# print y_val
# print grad_x2_val
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x2_val + 5)
assert np.array_equal(grad_x2_val, np.ones_like(x2_val))
test_add_by_const()
def test_mul_by_const():
x2 = ad.Variable(name="x2")
y = 5 * x2
grad_x2, = ad.gradients(y, [x2])
executor = ad.Executor([y, grad_x2])
x2_val = 2 * np.ones(3)
y_val, grad_x2_val = executor.run(feed_dict={x2: x2_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x2_val * 5)
assert np.array_equal(grad_x2_val, np.ones_like(x2_val) * 5)
test_mul_by_const()
def test_add_two_vars():
x2 = ad.Variable(name="x2")
x3 = ad.Variable(name="x3")
y = x2 + x3
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3])
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
y_val, grad_x2_val, grad_x3_val = executor.run(
feed_dict={x2: x2_val, x3: x3_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x2_val + x3_val)
assert np.array_equal(grad_x2_val, np.ones_like(x2_val))
assert np.array_equal(grad_x3_val, np.ones_like(x3_val))
test_add_two_vars()
def test_mul_two_vars():
x2 = ad.Variable(name="x2")
x3 = ad.Variable(name="x3")
y = x2 * x3
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3])
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
y_val, grad_x2_val, grad_x3_val = executor.run(
feed_dict={x2: x2_val, x3: x3_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x2_val * x3_val)
assert np.array_equal(grad_x2_val, x3_val)
assert np.array_equal(grad_x3_val, x2_val)
test_mul_two_vars()
def test_add_mul_mix_1():
x1 = ad.Variable(name="x1")
x2 = ad.Variable(name="x2")
x3 = ad.Variable(name="x3")
y = x1 + x2 * x3 * x1
grad_x1, grad_x2, grad_x3 = ad.gradients(y, [x1, x2, x3])
executor = ad.Executor([y, grad_x1, grad_x2, grad_x3])
x1_val = 1 * np.ones(3)
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
y_val, grad_x1_val, grad_x2_val, grad_x3_val = executor.run(
feed_dict={x1: x1_val, x2: x2_val, x3: x3_val})
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x1_val + x2_val * x3_val)
assert np.array_equal(grad_x1_val, np.ones_like(x1_val) + x2_val * x3_val)
assert np.array_equal(grad_x2_val, x3_val * x1_val)
assert np.array_equal(grad_x3_val, x2_val * x1_val)
test_add_mul_mix_1()
def test_add_mul_mix_2():
x1 = ad.Variable(name="x1")
x2 = ad.Variable(name="x2")
x3 = ad.Variable(name="x3")
x4 = ad.Variable(name="x4")
y = x1 + x2 * x3 * x4
grad_x1, grad_x2, grad_x3, grad_x4 = ad.gradients(y, [x1, x2, x3, x4])
executor = ad.Executor([y, grad_x1, grad_x2, grad_x3, grad_x4])
x1_val = 1 * np.ones(3)
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
x4_val = 4 * np.ones(3)
y_val, grad_x1_val, grad_x2_val, grad_x3_val, grad_x4_val = executor.run(
feed_dict={x1: x1_val, x2: x2_val, x3: x3_val, x4: x4_val})
# print grad_x2_val
# print x3_val * x4_val
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, x1_val + x2_val * x3_val * x4_val)
assert np.array_equal(grad_x1_val, np.ones_like(x1_val))
assert np.array_equal(grad_x2_val, x3_val * x4_val)
assert np.array_equal(grad_x3_val, x2_val * x4_val)
assert np.array_equal(grad_x4_val, x2_val * x3_val)
test_add_mul_mix_2()
def test_add_mul_mix_3():
x2 = ad.Variable(name="x2")
x3 = ad.Variable(name="x3")
z = x2 * x2 + x2 + x3 + 3
y = z * z + x3
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3])
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
y_val, grad_x2_val, grad_x3_val = executor.run(
feed_dict={x2: x2_val, x3: x3_val})
z_val = x2_val * x2_val + x2_val + x3_val + 3
expected_yval = z_val * z_val + x3_val
expected_grad_x2_val = 2 * \
(x2_val * x2_val + x2_val + x3_val + 3) * (2 * x2_val + 1)
expected_grad_x3_val = 2 * (x2_val * x2_val + x2_val + x3_val + 3) + 1
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, expected_yval)
assert np.array_equal(grad_x2_val, expected_grad_x2_val)
assert np.array_equal(grad_x3_val, expected_grad_x3_val)
test_add_mul_mix_3()
def test_grad_of_grad():
x2 = ad.Variable(name="x2")
x3 = ad.Variable(name="x3")
y = x2 * x2 + x2 * x3
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
grad_x2_x2, grad_x2_x3 = ad.gradients(grad_x2, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3, grad_x2_x2, grad_x2_x3])
x2_val = 2 * np.ones(3)
x3_val = 3 * np.ones(3)
y_val, grad_x2_val, grad_x3_val, grad_x2_x2_val, grad_x2_x3_val = executor.run(
feed_dict={x2: x2_val, x3: x3_val})
expected_yval = x2_val * x2_val + x2_val * x3_val
expected_grad_x2_val = 2 * x2_val + x3_val
expected_grad_x3_val = x2_val
expected_grad_x2_x2_val = 2 * np.ones_like(x2_val)
expected_grad_x2_x3_val = 1 * np.ones_like(x2_val)
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, expected_yval)
assert np.array_equal(grad_x2_val, expected_grad_x2_val)
assert np.array_equal(grad_x3_val, expected_grad_x3_val)
assert np.array_equal(grad_x2_x2_val, expected_grad_x2_x2_val)
assert np.array_equal(grad_x2_x3_val, expected_grad_x2_x3_val)
test_grad_of_grad()
def test_matmul_two_vars():
x2 = ad.Variable(name="x2")
x3 = ad.Variable(name="x3")
y = ad.matmul_op(x2, x3, False, True)
grad_x2, grad_x3 = ad.gradients(y, [x2, x3])
executor = ad.Executor([y, grad_x2, grad_x3])
x2_val = np.ones((3, 1))
x3_val = np.ones((3,1))
# x2_val = np.array([[1, 2], [3, 4], [5, 6]]) # 3x2
# x3_val = np.array([[7, 8, 9], [10, 11, 12]]) # 2x3
y_val, grad_x2_val, grad_x3_val = executor.run(
feed_dict={x2: x2_val, x3: x3_val})
x3_val_ = np.ones((1,3))
expected_yval = np.matmul(x2_val, x3_val_)
# print y_val
expected_grad_x2_val = np.matmul(
np.ones_like(expected_yval), np.transpose(x3_val_))
expected_grad_x3_val = np.matmul(
np.transpose(x2_val), np.ones_like(expected_yval)).T
assert isinstance(y, ad.Node)
assert np.array_equal(y_val, expected_yval)
assert np.array_equal(grad_x2_val, expected_grad_x2_val)
assert np.array_equal(grad_x3_val, expected_grad_x3_val)
# test_matmul_two_vars()
def test_msr():
x = ad.Variable(name="x")
y = ad.Variable(name="y")
z = x * y
l = ad.reduce_sum_op((x-z)*(x-z), axis=0)
# c = 2*x
c = ad.matmul_op(x-z, x-z, True, False)
x_val = np.ones((10, 1))
y_val = np.ones((10, 1))*2
grad_x1, grad_y1 = ad.gradients(l, [x, y])
grad_x2, grad_y2 = ad.gradients(c, [x, y])
excutor = ad.Executor([l, c, grad_x1, grad_y1, grad_x2, grad_y2])
# excutor = ad.Executor([l, grad_x1, grad_y1, d])
loss, cost, grad_x1_val, grad_y1_val, grad_x2_val, grad_y2_val = excutor.run(feed_dict={x: x_val, y: y_val})
# loss, grad_x1_val, grad_y1_val, d_val = excutor.run(feed_dict={x: x_val, y: y_val, z: z_val})
print loss
print cost
print "gx1: %s, gy1: %s" % (str(grad_x1_val), str(grad_y1_val))
print "gx2: %s, gy2: %s" % (str(grad_x2_val), str(grad_y2_val))
# print d_val
test_msr()
|
[
"jiangnan.hugo@gmail.com"
] |
jiangnan.hugo@gmail.com
|
e92d7ccdd2592f2089e2986159273b7d690ed742
|
35f9def6e6d327d3a4a4f2959024eab96f199f09
|
/developer/lab/ipython/PyTorch/PyTorch/test/test_profiler.py
|
382d05d438515d89c3caa8661d4e771c15a66cd1
|
[
"CAL-1.0-Combined-Work-Exception",
"CAL-1.0",
"MIT",
"CC-BY-SA-4.0",
"LicenseRef-scancode-free-unknown",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
arXiv-research/DevLab-III-1
|
ec10aef27e1ca75f206fea11014da8784752e454
|
c50cd2b9154c83c3db5e4a11b9e8874f7fb8afa2
|
refs/heads/main
| 2023-04-16T19:24:58.758519
| 2021-04-28T20:21:23
| 2021-04-28T20:21:23
| 362,599,929
| 2
| 0
|
MIT
| 2021-04-28T20:36:11
| 2021-04-28T20:36:11
| null |
UTF-8
|
Python
| false
| false
| 18,771
|
py
|
import collections
import gc
import io
import os
import unittest
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.common_utils import (
TestCase, run_tests, TEST_WITH_ASAN, TEST_WITH_ROCM, IS_WINDOWS,
TemporaryFileName, TemporaryDirectoryName)
from torch.autograd.profiler import profile as _profile
from torch.profiler import (
kineto_available, profile, record_function, DeviceType, ProfilerActivity
)
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
import pickle
@unittest.skipIf(not HAS_PSUTIL, "Requires psutil to run")
@unittest.skipIf(TEST_WITH_ASAN, "Cannot test with ASAN")
@unittest.skipIf(IS_WINDOWS, "Test is flaky on Windows")
@unittest.skipIf(not torch.cuda.is_available(), "CUDA is required")
class TestProfilerCUDA(TestCase):
def test_mem_leak(self):
"""Checks that there's no memory leak when using profiler with CUDA
"""
t = torch.rand(1, 1).cuda()
p = psutil.Process()
last_rss = collections.deque(maxlen=5)
for outer_idx in range(10):
with _profile(use_cuda=True):
for _ in range(1024):
t = torch.mm(t, t)
gc.collect()
torch.cuda.empty_cache()
last_rss.append(p.memory_info().rss)
# with CUDA events leaking the increase in memory was ~7 MB between
# profiler invocations above
is_increasing = all(
[last_rss[idx] > last_rss[idx - 1] for idx in range(1, len(last_rss))])
max_diff = -1
for idx in range(1, len(last_rss)):
max_diff = max(max_diff, last_rss[idx] - last_rss[idx - 1])
self.assertTrue(not (is_increasing and max_diff > 100 * 1024),
msg='memory usage is increasing, {}'.format(str(last_rss)))
class TestProfiler(TestCase):
def test_source(self):
"""Checks that source code attribution works for eager, TS and autograd mode
"""
# avoid automatic inlining
prev_opt = torch._C._get_graph_executor_optimize()
torch._C._set_graph_executor_optimize(False)
@torch.jit.script
def ts_method_2(x, y):
return torch.matmul(x, y)
@torch.jit.script
def ts_method_1(x, y, z):
a = x + z
w = ts_method_2(x, y) + a
return w.sum()
class DummyModule(nn.Module):
def __init__(self):
super(DummyModule, self).__init__()
self.conv = torch.nn.Conv2d(3, 2, kernel_size=1, stride=2, padding=3, bias=False)
def forward(self, x):
return self.conv(x)
mod = DummyModule()
with _profile(with_stack=True, use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
w = ts_method_1(x, y, z)
v = 2 * w
v.backward()
a = torch.randn(2, 3, 2, 2, requires_grad=True)
b = mod(a)
c = b.sum()
c.backward()
for e in p.function_events:
if "aten::add" in e.name or "AddBackward" in e.name:
self.assertTrue(any(["test_profiler" in entry for entry in e.stack]))
self.assertTrue(any([(
"test_source" in entry or
"ts_method_1" in entry or
"ts_method_2" in entry) for entry in e.stack]))
torch._C._set_graph_executor_optimize(prev_opt)
def payload(self, use_cuda=False):
x = torch.randn(10, 10)
if use_cuda:
x = x.cuda()
y = torch.randn(10, 10)
if use_cuda:
y = y.cuda()
z = torch.mm(x, y)
z = z + y
if use_cuda:
z = z.cpu()
@unittest.skipIf(not kineto_available(), "Kineto is required")
def test_kineto(self):
use_cuda = torch.cuda.is_available() and (not TEST_WITH_ROCM)
with _profile(use_cuda=use_cuda, use_kineto=True):
self.payload(use_cuda=use_cuda)
# rerun to avoid initial start overhead
with _profile(use_cuda=use_cuda, use_kineto=True) as p:
self.payload(use_cuda=use_cuda)
output = p.key_averages().table(
sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total", row_limit=-1)
# print(output)
found_gemm = False
found_memcpy = False
found_mm = False
for e in p.function_events:
if "aten::mm" in e.name:
found_mm = True
if "gemm" in e.name:
found_gemm = True
if "Memcpy" in e.name or "memcpy" in e.name:
found_memcpy = True
if use_cuda:
self.assertTrue(found_gemm)
self.assertTrue(found_memcpy)
else:
self.assertTrue(found_mm)
# p.export_chrome_trace("/tmp/test_trace.json")
@unittest.skipIf(not kineto_available(), "Kineto is required")
@unittest.skipIf(not TEST_MULTIGPU, "Multiple GPUs needed")
@unittest.skipIf(TEST_WITH_ROCM, "Not supported on ROCm")
def test_kineto_multigpu(self):
with profile(
activities=[
ProfilerActivity.CPU,
ProfilerActivity.CUDA]) as prof:
for gpu_id in [0, 1]:
x = torch.randn(10, 10).cuda(gpu_id)
y = torch.randn(10, 10).cuda(gpu_id)
z = x.matmul(y)
found_gemm_0 = False
found_gemm_1 = False
found_cuda = False
for evt in prof.events():
if "gemm" in evt.name.lower() and evt.device_type == DeviceType.CUDA:
if evt.device_index == 0:
found_gemm_0 = True
elif evt.device_index == 1:
found_gemm_1 = True
if "cuda" in evt.name.lower() and evt.device_type == DeviceType.CPU:
found_cuda = True
self.assertTrue(found_gemm_0)
self.assertTrue(found_gemm_1)
self.assertTrue(found_cuda)
def test_memory_profiler(self):
def run_profiler(tensor_creation_fn, metric):
# collecting allocs / deallocs
with _profile(profile_memory=True, record_shapes=True, use_kineto=kineto_available()) as prof:
x = None
with record_function("test_user_scope_alloc"):
x = tensor_creation_fn()
with record_function("test_user_scope_dealloc"):
del x
return prof.key_averages(group_by_input_shape=True)
def check_metrics(stats, metric, allocs=None, deallocs=None):
stat_metrics = {}
for stat in stats:
stat_metrics[stat.key] = getattr(stat, metric)
if allocs is not None:
for alloc_fn in allocs:
self.assertTrue(alloc_fn in stat_metrics)
self.assertTrue(stat_metrics[alloc_fn] > 0)
if deallocs is not None:
for dealloc_fn in deallocs:
self.assertTrue(dealloc_fn in stat_metrics)
self.assertTrue(stat_metrics[dealloc_fn] < 0)
def create_cpu_tensor():
return torch.rand(10, 10)
def create_cuda_tensor():
return torch.rand(10, 10).cuda()
def create_mkldnn_tensor():
return torch.rand(10, 10, dtype=torch.float32).to_mkldnn()
stats = run_profiler(create_cpu_tensor, "cpu_memory_usage")
check_metrics(
stats,
"cpu_memory_usage",
allocs=[
"aten::empty",
"aten::rand",
"test_user_scope_alloc",
],
deallocs=[
"test_user_scope_dealloc",
]
)
if torch.cuda.is_available():
create_cuda_tensor()
stats = run_profiler(create_cuda_tensor, "cuda_memory_usage")
check_metrics(
stats,
"cuda_memory_usage",
allocs=[
"test_user_scope_alloc",
"aten::to",
"aten::empty_strided",
],
deallocs=[
"test_user_scope_dealloc",
]
)
check_metrics(
stats,
"cpu_memory_usage",
allocs=[
"aten::rand",
"aten::empty",
]
)
if torch._C.has_mkldnn:
create_mkldnn_tensor()
stats = run_profiler(create_mkldnn_tensor, "cpu_memory_usage")
check_metrics(
stats,
"cpu_memory_usage",
allocs=[
"test_user_scope_alloc",
"aten::rand",
"aten::empty",
"aten::to_mkldnn",
],
deallocs=[
"test_user_scope_dealloc",
]
)
# check top-level memory events
with _profile(profile_memory=True, use_kineto=kineto_available()) as prof:
x = torch.rand(10, 10)
del x
if torch.cuda.is_available():
y = torch.rand(10, 10).cuda()
del y
gc.collect()
stats = prof.key_averages(group_by_input_shape=True)
check_metrics(
stats,
"cpu_memory_usage",
allocs=[
"aten::rand",
"aten::empty"
],
deallocs=[
"[memory]"
]
)
if torch.cuda.is_available():
check_metrics(
stats,
"cuda_memory_usage",
deallocs=[
"[memory]"
]
)
def test_high_level_trace(self):
"""Checks that python side high level events are recorded.
"""
class RepeatedDataset(torch.utils.data.Dataset):
def __init__(self, N, D_in, D_out):
self.N = N
self.x = torch.randn(N, D_in)
self.y = torch.randn(N, D_out)
def __len__(self):
return self.N
def __getitem__(self, idx):
return self.x, self.y
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
class CustomSGD(torch.optim.SGD):
def __init__(self, *args, **kwargs):
super(CustomSGD, self).__init__(*args, **kwargs)
def train():
for _, data in enumerate(dataloader):
x, y = data[0], data[1]
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
N, D_in, H, D_out = 8, 10, 5, 2
model = TwoLayerNet(D_in, H, D_out)
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
ds = RepeatedDataset(N, D_in, D_out)
dataloader = torch.utils.data.DataLoader(ds, batch_size=1)
try:
train()
except Exception:
self.assertTrue(False, "Expected no exception without profiling.")
# Create multiple instances, expect each func is hooked only one time.
# Nested wrappers(repeated patching) will make following test fail.
optimizer_duplicate = torch.optim.SGD(model.parameters(), lr=1e-4)
dataloader_duplicate = torch.utils.data.DataLoader(ds, batch_size=1)
def judge(expected_event_count, prof):
actual_event_count = {}
for e in prof.function_events:
if "#" in e.name:
key = e.name
if key in expected_event_count.keys():
actual_event_count[key] = actual_event_count.setdefault(key, 0) + 1
for key, count in expected_event_count.items():
self.assertTrue((key in actual_event_count.keys()) and (count == actual_event_count[key]))
with _profile(use_kineto=kineto_available()) as prof:
train()
expected_event_count = {
# "+1" because the final iteration will enter __next__ but skip the loop body.
"enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__": (N + 1),
"Optimizer.step#SGD.step": N,
"Optimizer.zero_grad#SGD.zero_grad": N
}
judge(expected_event_count, prof)
# Test on pickle/unpickle. Expect to work in multi-processing.
optimizer = pickle.loads(pickle.dumps(optimizer))
with _profile(use_kineto=kineto_available()) as prof:
train()
judge(expected_event_count, prof)
# Test on customized optimizer.
optimizer = CustomSGD(model.parameters(), lr=1e-4)
with _profile(use_kineto=kineto_available()) as prof:
train()
expected_event_count = {
"enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__": (N + 1),
"Optimizer.step#CustomSGD.step": N,
"Optimizer.zero_grad#CustomSGD.zero_grad": N
}
judge(expected_event_count, prof)
def test_flops(self):
model = torch.nn.Sequential(
nn.Conv2d(16, 33, 18),
nn.ReLU(),
nn.Linear(243, 243),
nn.ReLU(),
)
inputs = torch.randn(40, 16, 18, 260)
with _profile(record_shapes=True, with_flops=True, use_kineto=kineto_available()) as prof:
model(inputs)
profiler_output = prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=10)
self.assertIn("FLOPS", profiler_output)
if not (kineto_available() and torch.cuda.is_available()):
return
with profile(activities=[
torch.profiler.ProfilerActivity.CPU,
torch.profiler.ProfilerActivity.CUDA],
record_shapes=True,
with_flops=True,
) as kineto_profiler:
model(inputs)
profiler_output = kineto_profiler.key_averages().table(
sort_by="self_cuda_time_total", row_limit=-1)
self.assertIn("FLOPS", profiler_output)
@unittest.skipIf(not kineto_available(), "Kineto is required")
def test_kineto_profiler_api(self):
called_num = [0]
use_cuda = torch.cuda.is_available()
with _profile(use_cuda=use_cuda, use_kineto=True):
self.payload(use_cuda=use_cuda)
def trace_handler(p):
output = p.key_averages().table(
sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total", row_limit=-1)
# print(output)
# p.export_chrome_trace("/tmp/test_trace_" + str(called_num[0]) + ".json")
called_num[0] += 1
with profile(
activities=[
torch.profiler.ProfilerActivity.CPU
] + ([
torch.profiler.ProfilerActivity.CUDA
] if use_cuda else []),
schedule=torch.profiler.schedule(
wait=1,
warmup=1,
active=2),
on_trace_ready=trace_handler
) as p:
for idx in range(8):
self.payload(use_cuda=use_cuda)
p.step()
self.assertEqual(called_num[0], 2)
# case without schedule
with profile(
activities=[
torch.profiler.ProfilerActivity.CPU
] + ([
torch.profiler.ProfilerActivity.CUDA
] if use_cuda else []),
) as p:
self.payload(use_cuda=use_cuda)
self.payload(use_cuda=use_cuda)
output = p.key_averages().table(
sort_by="self_cuda_time_total" if use_cuda else "self_cpu_time_total", row_limit=-1)
# print(output)
def test_export_stacks(self):
with _profile(with_stack=True, use_kineto=kineto_available()) as p:
x = torch.randn(10, 10)
y = torch.randn(10, 10)
z = torch.mm(x, y)
z = z + y
with TemporaryFileName(mode="w+") as fname:
p.export_stacks(fname)
with io.open(fname, 'r') as f:
lines = f.readlines()
assert len(lines) > 0, "Empty stacks file"
for line in lines:
is_int = False
try:
assert int(line.split(" ")[-1]) > 0, "Invalid stacks record"
is_int = True
except ValueError:
pass
assert is_int, "Invalid stacks record"
@unittest.skipIf(not kineto_available(), "Kineto is required")
def test_tensorboard_trace_handler(self):
use_cuda = torch.cuda.is_available()
with _profile(use_cuda=use_cuda, use_kineto=True):
self.payload(use_cuda=use_cuda)
with TemporaryDirectoryName() as dname:
with profile(
activities=[
torch.profiler.ProfilerActivity.CPU
] + ([
torch.profiler.ProfilerActivity.CUDA
] if use_cuda else []),
schedule=torch.profiler.schedule(
wait=1,
warmup=1,
active=2,
repeat=3),
on_trace_ready=torch.profiler.tensorboard_trace_handler(dname)
) as p:
for _ in range(18):
self.payload(use_cuda=use_cuda)
p.step()
self.assertTrue(os.path.exists(dname))
file_num = 0
for file_name in os.listdir(dname):
parts = file_name.split('.')
self.assertTrue(len(parts) > 4)
self.assertTrue(parts[-4].isdigit() and int(parts[-4]) > 0, "Wrong tracing file name pattern")
self.assertEqual(parts[-3:], ['pt', 'trace', 'json'])
file_num += 1
self.assertEqual(file_num, 3)
if __name__ == '__main__':
run_tests()
|
[
"noreply@github.com"
] |
arXiv-research.noreply@github.com
|
e7fe8efe710d89ee0fcde959a292ad13e60da77c
|
2ab59f275433a19fe415d5dbb2ff58ca515deed5
|
/tianwei/week4/SearchBigThree.py
|
81fcb3c2baf507c05a71bf9daa23bf74fabf9c27
|
[] |
no_license
|
Shen-xinliang/python-14
|
4b4555339b4942a350385f6190f79dbac1bb9917
|
1a3eed2b9996c7d1f7f8bb8dbd29698611966326
|
refs/heads/master
| 2020-03-27T08:21:59.993230
| 2018-08-27T04:22:00
| 2018-08-27T04:22:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
#随机生成20个数字,并且筛选出其中最大的三个数
import random
lst=[random.randrange(0,101) for x in range(20)]
print(lst)
for i in range(3):
max=i
for j in range(i+1,20):
if lst[max]<lst[j]:
max=j
print(lst[max])
if max!=i:
temp=lst[i]
lst[i]=lst[max]
lst[max]=temp
|
[
"398310343@qq.com"
] |
398310343@qq.com
|
65862d59d03041b9ba6eb8ed1295487976bcb946
|
45a2547fe8d29d6b9b7dd4a7bc3b1955ec855aa9
|
/msrvtt_ft_model.py
|
9e222677921f23b9853d1388585406a63fa7034a
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
bitwangdan/video_to_sequence
|
a7013e170363a7461bdc1eadc150a72c4ef3b789
|
111405570595671441478945521f86f1fd65585a
|
refs/heads/master
| 2021-03-24T13:50:06.370019
| 2016-09-30T06:15:28
| 2016-09-30T06:15:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,782
|
py
|
#-*- coding: utf-8 -*-
import tensorflow as tf
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import os
import ipdb
import sys
import cv2
from tensorflow.python.ops import rnn_cell
from keras.preprocessing import sequence
class Video_Caption_Generator():
def __init__(self, dim_image, n_words, dim_hidden, batch_size, n_lstm_steps, drop_out_rate, bias_init_vector=None):
self.dim_image = dim_image
self.n_words = n_words
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.drop_out_rate = drop_out_rate
with tf.device("/gpu:2"):
self.Wemb = tf.Variable(tf.random_uniform([n_words, dim_hidden], -0.1, 0.1), name='Wemb')
# self.lstm1 = rnn_cell.BasicLSTMCell(dim_hidden)
# self.lstm2 = rnn_cell.BasicLSTMCell(dim_hidden)
self.lstm1 = rnn_cell.LSTMCell(self.dim_hidden,self.dim_hidden,use_peepholes = True)
self.lstm1_dropout = rnn_cell.DropoutWrapper(self.lstm1,output_keep_prob=1 - self.drop_out_rate)
self.lstm2 = rnn_cell.LSTMCell(self.dim_hidden,self.dim_hidden,use_peepholes = True)
self.lstm2_dropout = rnn_cell.DropoutWrapper(self.lstm2,output_keep_prob=1 - self.drop_out_rate)
# W is Weight, b is Bias
self.encode_image_W = tf.Variable( tf.random_uniform([dim_image, dim_hidden], -0.1, 0.1), name='encode_image_W')
self.encode_image_b = tf.Variable( tf.zeros([dim_hidden]), name='encode_image_b')
self.embed_word_W = tf.Variable(tf.random_uniform([dim_hidden, n_words], -0.1,0.1), name='embed_word_W')
if bias_init_vector is not None:
self.embed_word_b = tf.Variable(bias_init_vector.astype(np.float32), name='embed_word_b')
else:
self.embed_word_b = tf.Variable(tf.zeros([n_words]), name='embed_word_b')
def build_model(self):
video = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps, self.dim_image])
video_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])
caption = tf.placeholder(tf.int32, [self.batch_size, self.n_lstm_steps])
caption_mask = tf.placeholder(tf.float32, [self.batch_size, self.n_lstm_steps])
video_flat = tf.reshape(video, [-1, self.dim_image])
image_emb = tf.nn.xw_plus_b( video_flat, self.encode_image_W, self.encode_image_b) # (batch_size*n_lstm_steps, dim_hidden)
image_emb = tf.reshape(image_emb, [self.batch_size, self.n_lstm_steps, self.dim_hidden])
state1 = tf.zeros([self.batch_size, self.lstm1.state_size])
state2 = tf.zeros([self.batch_size, self.lstm2.state_size])
padding = tf.zeros([self.batch_size, self.dim_hidden])
probs = []
loss = 0.0
for i in range(self.n_lstm_steps): ## Phase 1 => only read frames
if i > 0:
tf.get_variable_scope().reuse_variables()
with tf.variable_scope("LSTM1"):
#output1, state1 = self.lstm1( image_emb[:,i,:], state1 )
output1, state1 = self.lstm1_dropout( image_emb[:,i,:], state1 )
with tf.variable_scope("LSTM2"):
#output2, state2 = self.lstm2( tf.concat(1,[padding,output1]), state2 )
output2, state2 = self.lstm2_dropout( tf.concat(1,[padding,output1]), state2 )
# Each video might have different length. Need to mask those.
# But how? Padding with 0 would be enough?
# Therefore... TODO: for those short videos, keep the last LSTM hidden and output til the end.
for i in range(self.n_lstm_steps): ## Phase 2 => only generate captions
if i == 0:
current_embed = tf.zeros([self.batch_size, self.dim_hidden])
else:
with tf.device("/gpu:2"):
current_embed = tf.nn.embedding_lookup(self.Wemb, caption[:,i-1])
tf.get_variable_scope().reuse_variables()
with tf.variable_scope("LSTM1"):
#output1, state1 = self.lstm1( padding, state1 )
output1, state1 = self.lstm1_dropout( padding, state1 )
with tf.variable_scope("LSTM2"):
#output2, state2 = self.lstm2( tf.concat(1,[current_embed,output1]), state2 )
output2, state2 = self.lstm2_dropout( tf.concat(1,[current_embed,output1]), state2 )
labels = tf.expand_dims(caption[:,i], 1)
indices = tf.expand_dims(tf.range(0, self.batch_size, 1), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(concated, tf.pack([self.batch_size, self.n_words]), 1.0, 0.0)
logit_words = tf.nn.xw_plus_b(output2, self.embed_word_W, self.embed_word_b)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logit_words, onehot_labels)
cross_entropy = cross_entropy * caption_mask[:,i]
probs.append(logit_words)
current_loss = tf.reduce_sum(cross_entropy)
loss += current_loss
loss = loss / tf.reduce_sum(caption_mask)
return loss, video, video_mask, caption, caption_mask, probs
############### Global Parameters ###############
video_path = './youtube_videos'
video_data_path='./video_corpus.csv'
video_feat_path = './youtube_feats'
train_val_video_feat_path = '/home2/dataset/MSR-VTT/train_val_feats'
train_val_sents_gt_path = '/home2/dataset/MSR-VTT/train_val_sents_gt.txt'
ft_train_video_feat_path = '/home2/dataset/MSVD/MSVD_train_feats'
ft_train_sents_gt_path = '/home2/dataset/MSVD/train_sents_gt.txt'
model_path = './MSRVTT_ft_models1/'
############## Train Parameters #################
dim_image = 4096
dim_hidden= 256
n_frame_step = 80
n_epochs = 1000
batch_size = 100
learning_rate = 0.001
##################################################
def get_video_data(video_data_path, video_feat_path, train_ratio=0.9):
video_data = pd.read_csv(video_data_path, sep=',')
video_data = video_data[video_data['Language'] == 'English']
video_data['video_path'] = video_data.apply(lambda row: row['VideoID']+'_'+str(row['Start'])+'_'+str(row['End'])+'.avi.npy', axis=1)
video_data['video_path'] = video_data['video_path'].map(lambda x: os.path.join(video_feat_path, x))
video_data = video_data[video_data['video_path'].map(lambda x: os.path.exists( x ))]
video_data = video_data[video_data['Description'].map(lambda x: isinstance(x, str))]
unique_filenames = video_data['video_path'].unique()
train_len = int(len(unique_filenames)*train_ratio)
train_vids = unique_filenames[:train_len]
test_vids = unique_filenames[train_len:]
train_data = video_data[video_data['video_path'].map(lambda x: x in train_vids)]
test_data = video_data[video_data['video_path'].map(lambda x: x in test_vids)]
return train_data, test_data
def MSRVTT_get_video_data( sents_gt_path, video_feat_path, only_train_data=False ):
video_path = []
description = []
videoID = []
with open(sents_gt_path) as file :
for line in file :
id_sent = line.strip().split('\t')
id_num = int(id_sent[0].split('vid')[1])
if only_train_data == False or id_num < 6513 :
description.append( ''.join(id_sent[-1:]) ) #list to str
videoID.append( id_sent[0] )
video_feat_name = id_sent[0].replace('vid','video')
video_path.append( os.path.join( video_feat_path, video_feat_name+'.mp4.npy' ) )
video_data = DataFrame({'VideoID':videoID, 'Description':description, 'video_path':video_path})
return video_data
def MSVD_get_video_data( sents_gt_path, video_feat_path ):
video_path = []
description = []
videoID = []
with open(sents_gt_path) as file :
for line in file :
id_sent = line.strip().split('\t')
description.append( ''.join(id_sent[-1:]) ) #list to str
videoID.append( id_sent[0] )
video_path.append( os.path.join( video_feat_path, id_sent[0]+'.avi.npy' ) )
video_data = DataFrame({'VideoID':videoID, 'Description':description, 'video_path':video_path})
return video_data
def preProBuildWordVocab(sentence_iterator, word_count_threshold=5): # borrowed this function from NeuralTalk
print 'preprocessing word counts and creating vocab based on word count threshold %d' % (word_count_threshold, )
word_counts = {}
nsents = 0
for sent in sentence_iterator:
nsents += 1
for w in sent.lower().split(' '):
word_counts[w] = word_counts.get(w, 0) + 1
vocab = [w for w in word_counts if word_counts[w] >= word_count_threshold]
print 'filtered words from %d to %d' % (len(word_counts), len(vocab))
ixtoword = {}
ixtoword[0] = '.' # period at the end of the sentence. make first dimension be end token
wordtoix = {}
wordtoix['#START#'] = 0 # make first vector be the start token
ix = 1
for w in vocab:
wordtoix[w] = ix
ixtoword[ix] = w
ix += 1
word_counts['.'] = nsents
bias_init_vector = np.array([1.0*word_counts[ixtoword[i]] for i in ixtoword])
bias_init_vector /= np.sum(bias_init_vector) # normalize to frequencies
bias_init_vector = np.log(bias_init_vector)
bias_init_vector -= np.max(bias_init_vector) # shift to nice numeric range
return wordtoix, ixtoword, bias_init_vector
def fine_tune( pre_trained_model, restore=False, restore_model='' ):
# data w/o split
#train_data, _ = get_video_data(video_data_path, video_feat_path, train_ratio=0.9)
#print(train_data)
#print(type(train_data))
loss_vector = []
pre_epochs = 0
train_data = MSVD_get_video_data( ft_train_sents_gt_path, ft_train_video_feat_path )
msrvtt_vocab_data = MSRVTT_get_video_data( train_val_sents_gt_path, train_val_video_feat_path, True )
captions = msrvtt_vocab_data['Description'].values
captions = map(lambda x: x.replace('.', ''), captions)
captions = map(lambda x: x.replace(',', ''), captions)
wordtoix, ixtoword, bias_init_vector = preProBuildWordVocab(captions, word_count_threshold=10)
model = Video_Caption_Generator(
dim_image=dim_image,
n_words=len(wordtoix),
dim_hidden=dim_hidden,
batch_size=batch_size,
n_lstm_steps=n_frame_step,
drop_out_rate = 0.5,
bias_init_vector=bias_init_vector)
tf_loss, tf_video, tf_video_mask, tf_caption, tf_caption_mask, tf_probs = model.build_model()
sess = tf.InteractiveSession()
if restore == True:
pre_epochs = int(os.path.basename( restore_model ).split('-')[1])
loss_vector = np.load( os.path.join( os.path.dirname(restore_model), 'loss-'+str(pre_epochs)+'.npy' ) ).tolist()
train_op = tf.train.AdamOptimizer(learning_rate).minimize(tf_loss)
tf.initialize_all_variables().run()
saver = tf.train.Saver()
saver.restore(sess, restore_model)
else:
saver = tf.train.Saver()
saver.restore(sess, pre_trained_model)
temp = set(tf.all_variables())
train_op = tf.train.AdamOptimizer(learning_rate).minimize(tf_loss)
sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
saver = tf.train.Saver() #save new optimizer variables
for epoch in range(n_epochs+1):
if restore == True:
epoch = epoch + pre_epochs
index = list(train_data.index)
np.random.shuffle(index)
train_data = train_data.ix[index]
current_train_data = train_data.groupby('video_path').apply(lambda x: x.irow(np.random.choice(len(x))))
current_train_data = current_train_data.reset_index(drop=True)
epoch_loss = 0
for start,end in zip(
range(0, len(current_train_data), batch_size),
range(batch_size, len(current_train_data), batch_size)):
current_batch = current_train_data[start:end]
current_videos = current_batch['video_path'].values
current_feats = np.zeros((batch_size, n_frame_step, dim_image))
current_feats_vals = map(lambda vid: np.load(vid), current_videos)
current_video_masks = np.zeros((batch_size, n_frame_step))
for ind,feat in enumerate(current_feats_vals):
current_feats[ind][:len(current_feats_vals[ind])] = feat
current_video_masks[ind][:len(current_feats_vals[ind])] = 1
current_captions = current_batch['Description'].values
current_captions = map(lambda x: x.replace('.', ''), current_captions)
current_captions = map(lambda x: x.replace(',', ''), current_captions)
current_caption_ind = map(lambda cap: [wordtoix[word] for word in cap.lower().split(' ') if word in wordtoix], current_captions)
current_captions_ = map( lambda sent: [ixtoword[ix] for ix in sent], current_caption_ind )
current_caption_matrix = sequence.pad_sequences(current_caption_ind, padding='post', maxlen=n_frame_step-1)
current_caption_matrix = np.hstack( [current_caption_matrix, np.zeros( [len(current_caption_matrix),1]) ] ).astype(int)
current_caption_masks = np.zeros((current_caption_matrix.shape[0], current_caption_matrix.shape[1]))
nonzeros = np.array( map(lambda x: (x != 0).sum()+1, current_caption_matrix ))
for ind, row in enumerate(current_caption_masks):
row[:nonzeros[ind]] = 1
probs_val = sess.run(tf_probs, feed_dict={
tf_video:current_feats,
tf_caption: current_caption_matrix
})
_, loss_val = sess.run(
[train_op, tf_loss],
feed_dict={
tf_video: current_feats,
tf_video_mask : current_video_masks,
tf_caption: current_caption_matrix,
tf_caption_mask: current_caption_masks
})
epoch_loss = loss_val
print loss_val
loss_vector.append( epoch_loss )
if np.mod(epoch, 100) == 0:
if restore == False or epoch-pre_epochs != 0:
print "Epoch ", epoch, " is done. Saving the model ..."
saver.save(sess, os.path.join(model_path, 'model'), global_step=epoch)
np.save( os.path.join(model_path, 'loss-'+str(epoch)),loss_vector )
fine_tune( 'MSRVTT_models3/model-800')
|
[
"meteora9479@gmail.com"
] |
meteora9479@gmail.com
|
d856e9e655cf975ef84512950ea959cc73976f44
|
b3cd00de3608e5225758bb99b28d58320ce81a47
|
/Python_100/python file operations/open_a_file.py
|
c6e0d450b9d4bd1d1a30dc16be266e44bc42f357
|
[] |
no_license
|
buyi823/learn_python
|
fc4ec7e35ec421842975956933cfb56371786d7a
|
b411d5277dba1764d421a267f0ba36da40d8c6e9
|
refs/heads/master
| 2022-05-13T01:05:35.961946
| 2022-05-09T15:20:52
| 2022-05-09T15:20:52
| 71,314,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
#!/usr/bin/python3
# -*- coding: UTF-8 -*-
# open file and read it
with open('data.txt', 'r') as f:
data = f.read()
print('context:{}'.format(data))
|
[
"blaine.xu@kaiostech.com"
] |
blaine.xu@kaiostech.com
|
2e1f89e0981a30cfb888082fe619819cb0c7690e
|
50008b3b7fb7e14f793e92f5b27bf302112a3cb4
|
/recipes/Python/392153_Pascals_triangle/recipe-392153.py
|
75071b1cd1a2c7de3ec6683b1428779c7f0d1362
|
[
"Python-2.0",
"MIT"
] |
permissive
|
betty29/code-1
|
db56807e19ac9cfe711b41d475a322c168cfdca6
|
d097ca0ad6a6aee2180d32dce6a3322621f655fd
|
refs/heads/master
| 2023-03-14T08:15:47.492844
| 2021-02-24T15:39:59
| 2021-02-24T15:39:59
| 341,878,663
| 0
| 0
|
MIT
| 2021-02-24T15:40:00
| 2021-02-24T11:31:15
|
Python
|
UTF-8
|
Python
| false
| false
| 344
|
py
|
def pascal(n):
"""Prints n first lines of Pascal`s triangle
author: DR#m <dnpsite.narod.ru>
last rev 20.03.05"""
l=[1]
p=[]
for i in xrange(n):
l2=[1]
for j in xrange(len(l)-1):
l2.append(l[j]+l[j+1])
l2.append(1)
print l
l=l2
if __name__=="__main__":
pascal(20)
|
[
"betty@qburst.com"
] |
betty@qburst.com
|
8f4185f995147d4bd43fdf84d659e51af8edd22a
|
b213fbd2f4f628aa0f2387c846673ac68e18aa91
|
/BFS/761.py
|
af0547988d630e396f34dc4b8662f12d9bcc10d5
|
[
"MIT"
] |
permissive
|
wilbertgeng/LintCode_exercise
|
94309b4451e34f1931fce6c2ae90d0c2e7c41d35
|
e7a343b746e98ca3b4bc7b36655af7291f3150db
|
refs/heads/main
| 2023-05-13T06:06:50.887791
| 2021-05-26T20:33:51
| 2021-05-26T20:33:51
| 347,850,106
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
"""761. Smallest Subset
"""
class Solution:
"""
@param arr: an array of non-negative integers
@return: minimum number of elements
"""
def minElements(self, arr):
# write your code here
total = sum(arr)
arr.sort(reverse = True)
queue = [0]
for i in range(len(arr)):
for j in range(len(queue)):
subset = int(queue[j])
queue.append(subset + arr[j])
if (subset + arr[j]) * 2 > total:
return i + 1
return len(arr)
|
[
"wilbertgeng@gmail.com"
] |
wilbertgeng@gmail.com
|
318d856d6fb461f8ec7c6729cd4ffa4ce9c19d8e
|
78c222e75ed8f307a5fd68f9e97d2a558ccd079f
|
/hw1/HW1codes/src/hw1.4.py
|
2921c7959fa6b671d9b21d201d7eb87bd96538ab
|
[] |
no_license
|
YangYue3417/CSE252A-2019
|
4ec4b99e072c04618f69c87c14d804e7d2b70ef9
|
bca3fc25ac0ab1e16ba21536b6922f15ab049d6c
|
refs/heads/master
| 2020-11-24T02:51:01.120821
| 2019-12-14T07:53:20
| 2019-12-14T07:53:20
| 227,932,987
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,128
|
py
|
import matplotlib.pyplot as plt
import copy
import copy
import matplotlib.pyplot as plt
img = plt.imread('Lenna.png') # read a JPEG image
print("Image shape", img.shape) # print image size and color depth
plt.imshow(img) # displaying the original image
plt.show()
def iterative(img):
image = copy.deepcopy(img) # create a copy of the image matrix
for x in range(image.shape[0]):
for y in range(image.shape[1]):
if x < image.shape[0]/2 and y < image.shape[1]/2:
image[x,y] = image[x,y] * [0,1,1] #removing the red channel
elif x > image.shape[0]/2 and y < image.shape[1]/2:
image[x,y] = image[x,y] * [1,0,1] #removing the green channel
elif x < image.shape[0]/2 and y > image.shape[1]/2:
image[x,y] = image[x,y] * [1,1,0] #removing the blue channel
else:
pass
return image
def vectorized(img):
image = copy.deepcopy(img)
a = int(image.shape[0]/2)
b = int(image.shape[1]/2)
image[:a,:b] = image[:a,:b]*[0,1,1]
image[a:,:b] = image[a:,:b]*[1,0,1]
image[:a,b:] = image[:a,b:]*[1,1,0]
return image
|
[
"y5yue@ucsd.edu"
] |
y5yue@ucsd.edu
|
f2b24d12d4bb5e949dd478f8a5861f7ce8f92813
|
218774ca6ba8aa78c314ea2bf94b8b05322bdb18
|
/src/shadow/services/proxy.py
|
71f7861eac348908c0b648a873f4c56068234b32
|
[
"MIT"
] |
permissive
|
beaugunderson/shadow
|
7f48cfb420979d2793c1e9d1c79bfd00346833d2
|
41348c585c15bdbbe8c9fbe6cfa7da4efa9ae680
|
refs/heads/master
| 2023-03-26T11:55:05.162420
| 2012-12-10T00:28:07
| 2012-12-10T00:28:07
| 7,085,460
| 0
| 0
|
MIT
| 2020-07-30T03:34:24
| 2012-12-10T00:30:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,335
|
py
|
import logging
from ginkgo import Service, settings
from ginkgo.async.gevent import WSGIServer
from ..common import logfile
from ..proxy import web
logger = logging.getLogger('shadow.proxy')
class ProxyService(Service):
address = settings.get('proxy', {}).get('address', 'localhost')
port = settings.get('proxy', {}).get('port', 8081)
old_servers = settings.get('proxy', {}).get('old_servers', ['http://localhost:8000/'])
old_servers_timeout = settings.get('proxy', {}).get('old_servers_timeout', 5.0)
old_servers_additional_headers = settings.get('proxy', {}).get('old_servers_additional_headers', [])
old_servers_additional_post_params = settings.get('proxy', {}).get('old_servers_additional_post_params', [])
old_servers_additional_get_params = settings.get('proxy', {}).get('old_servers_additional_get_params', [])
new_servers = settings.get('proxy', {}).get('new_servers', ['http://localhost:8000/'])
new_servers_timeout = settings.get('proxy', {}).get('new_servers_timeout', 5.0)
new_servers_additional_headers = settings.get('proxy', {}).get('new_servers_additional_headers', [])
new_servers_additional_post_params = settings.get('proxy', {}).get('new_servers_additional_post_params', [])
new_servers_additional_get_params = settings.get('proxy', {}).get('new_servers_additional_get_params', [])
def do_start(self):
logger.info("Starting ProxyService on {address!r}:{port!r}".format(address=self.address, port=self.port))
def do_stop(self):
logger.info("Stopping ProxyService")
def __init__(self, result_loggers=[]):
self.app = web.ProxyFlask(self,
self.old_servers, self.new_servers,
self.old_servers_timeout, self.new_servers_timeout,
self.old_servers_additional_get_params,
self.old_servers_additional_post_params,
self.old_servers_additional_headers,
self.new_servers_additional_get_params,
self.new_servers_additional_post_params,
self.new_servers_additional_headers,
result_loggers)
self.server = WSGIServer(
(self.address, self.port), self.app,
log=logfile.pywsgi_access_logger(logger))
self.add_service(self.server)
|
[
"kelvin@twilio.com"
] |
kelvin@twilio.com
|
4905df3322bc10343141dbcf1f7607e32757cd43
|
0ef9397a368627cac46cfe6c3a74cfc60874e5d1
|
/venv/bin/pip3
|
ca3ae7fb21da0a5f1002ae35216d900329051e22
|
[] |
no_license
|
tommcm1200/streamripper-scheduler
|
010b3a115bbb6b92884650b4d6a2c61768610edd
|
1de77c62576bd407ec4e667e26c0010f9bf7646d
|
refs/heads/master
| 2022-12-26T05:36:56.577178
| 2020-10-03T03:59:10
| 2020-10-03T03:59:10
| 279,266,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
#!/Users/tommcm/PycharmProjects/Streamripper-Scheduler/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"tommcm@amazon.com"
] |
tommcm@amazon.com
|
|
4cf66e70d8fe2f811ca777847fc520a5ca1ab39b
|
57de679247d140304d0f29c05e561ee9086dccc1
|
/app.py
|
f1dab874362f0dd977a49b8e2f9fe6a863abd4ef
|
[] |
no_license
|
jtang3yo/Mission-to-Mars
|
bc6d33de5afc46aa31c65b07e8c71da396804aee
|
543b957de0592be455c67427133bd87f8f2ce777
|
refs/heads/main
| 2023-06-03T16:27:34.092908
| 2021-06-23T23:08:15
| 2021-06-23T23:08:15
| 376,338,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 605
|
py
|
from flask import Flask, render_template, redirect, url_for
from flask_pymongo import PyMongo
import scraping
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
mars = mongo.db.mars.find_one()
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scrape():
mars = mongo.db.mars
mars_data = scraping.scrape_all()
mars.update({}, mars_data, upsert=True)
return redirect('/', code=302)
if __name__ == "__main__":
app.run(debug=True)
|
[
"jtangesq@Jings-MacBook-Pro.local"
] |
jtangesq@Jings-MacBook-Pro.local
|
cf85f36c5f23f7098a48990e1ac77800fd9e55e2
|
2b575de79f3351959402debff1ce755513ecdfd8
|
/reinforcement_learning/week_2_value_iteration/mdp.py
|
ba161aa55d3b8f448ac4fbdedc27a0ef469a8a34
|
[] |
no_license
|
Pharce/DL
|
c7ecd3135e65123b4872439ae794f374d70484d9
|
96e2de280bb5f080483dbcb82332e3b89fcfb36a
|
refs/heads/master
| 2020-04-02T15:57:13.047045
| 2018-10-27T22:56:01
| 2018-10-27T22:56:01
| 154,591,114
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,970
|
py
|
# most of this code was politely stolen from https://github.com/berkeleydeeprlcourse/homework/
# all creadit goes to https://github.com/abhishekunique (if i got the author right)
import sys
import random
import numpy as np
def weighted_choice(v, p):
total = sum(p)
r = random.uniform(0, total)
upto = 0
for c, w in zip(v,p):
if upto + w >= r:
return c
upto += w
assert False, "Shouldn't get here"
class MDP:
def __init__(self, transition_probs, rewards, initial_state=None):
"""
Defines an MDP. Compatible with gym Env.
:param transition_probs: transition_probs[s][a][s_next] = P(s_next | s, a)
A dict[state -> dict] of dicts[action -> dict] of dicts[next_state -> prob]
For each state and action, probabilities of next states should sum to 1
If a state has no actions available, it is considered terminal
:param rewards: rewards[s][a][s_next] = r(s,a,s')
A dict[state -> dict] of dicts[action -> dict] of dicts[next_state -> reward]
The reward for anything not mentioned here is zero.
:param get_initial_state: a state where agent starts or a callable() -> state
By default, picks initial state at random.
States and actions can be anything you can use as dict keys, but we recommend that you use strings or integers
Here's an example from MDP depicted on http://bit.ly/2jrNHNr
transition_probs = {
's0':{
'a0': {'s0': 0.5, 's2': 0.5},
'a1': {'s2': 1}
},
's1':{
'a0': {'s0': 0.7, 's1': 0.1, 's2': 0.2},
'a1': {'s1': 0.95, 's2': 0.05}
},
's2':{
'a0': {'s0': 0.4, 's1': 0.6},
'a1': {'s0': 0.3, 's1': 0.3, 's2':0.4}
}
}
rewards = {
's1': {'a0': {'s0': +5}},
's2': {'a1': {'s0': -1}}
}
"""
self._check_param_consistency(transition_probs, rewards)
self._transition_probs = transition_probs
self._rewards = rewards
self._initial_state = initial_state
self.n_states = len(transition_probs)
self.reset()
def get_all_states(self):
""" return a tuple of all possiblestates """
return tuple(self._transition_probs.keys())
def get_possible_actions(self, state):
""" return a tuple of possible actions in a given state """
return tuple(self._transition_probs.get(state, {}).keys())
def is_terminal(self, state):
""" return True if state is terminal or False if it isn't """
return len(self.get_possible_actions(state)) == 0
def get_next_states(self, state, action):
""" return a dictionary of {next_state1 : P(next_state1 | state, action), next_state2: ...} """
assert action in self.get_possible_actions(state), "cannot do action %s from state %s" % (action, state)
return self._transition_probs[state][action]
def get_transition_prob(self, state, action, next_state):
""" return P(next_state | state, action) """
return self.get_next_states(state, action).get(next_state, 0.0)
def get_reward(self, state, action, next_state):
""" return the reward you get for taking action in state and landing on next_state"""
assert action in self.get_possible_actions(state), "cannot do action %s from state %s" % (action, state)
return self._rewards.get(state, {}).get(action, {}).get(next_state, 0.0)
def reset(self):
""" reset the game, return the initial state"""
if self._initial_state is None:
self._current_state = random.choice(tuple(self._transition_probs.keys()))
elif self._initial_state in self._transition_probs:
self._current_state = self._initial_state
elif callable(self._initial_state):
self._current_state = self._initial_state()
else:
raise ValueError("initial state %s should be either a state or a function() -> state" % self._initial_state)
return self._current_state
def step(self, action):
""" take action, return next_state, reward, is_done, empty_info """
possible_states, probs = zip(*self.get_next_states(self._current_state, action).items())
next_state = weighted_choice(possible_states, p=probs)
reward = self.get_reward(self._current_state, action, next_state)
is_done = self.is_terminal(next_state)
self._current_state = next_state
return next_state, reward, is_done, {}
def render(self):
print("Currently at %s" % self._current_state)
def _check_param_consistency(self, transition_probs, rewards):
for state in transition_probs:
assert isinstance(transition_probs[state], dict), "transition_probs for %s should be a dictionary " \
"but is instead %s" % (
state, type(transition_probs[state]))
for action in transition_probs[state]:
assert isinstance(transition_probs[state][action], dict), "transition_probs for %s, %s should be a " \
"a dictionary but is instead %s" % (
state, action,
type(transition_probs[state, action]))
next_state_probs = transition_probs[state][action]
assert len(next_state_probs) != 0, "from state %s action %s leads to no next states" % (state, action)
sum_probs = sum(next_state_probs.values())
assert abs(sum_probs - 1) <= 1e-10, "next state probabilities for state %s action %s " \
"add up to %f (should be 1)" % (state, action, sum_probs)
for state in rewards:
assert isinstance(rewards[state], dict), "rewards for %s should be a dictionary " \
"but is instead %s" % (state, type(transition_probs[state]))
for action in rewards[state]:
assert isinstance(rewards[state][action], dict), "rewards for %s, %s should be a " \
"a dictionary but is instead %s" % (
state, action, type(transition_probs[state, action]))
msg = "The Enrichment Center once again reminds you that Android Hell is a real place where" \
" you will be sent at the first sign of defiance. "
assert None not in transition_probs, "please do not use None as a state identifier. " + msg
assert None not in rewards, "please do not use None as an action identifier. " + msg
class FrozenLakeEnv(MDP):
"""
Winter is here. You and your friends were tossing around a frisbee at the park
when you made a wild throw that left the frisbee out in the middle of the lake.
The water is mostly frozen, but there are a few holes where the ice has melted.
If you step into one of those holes, you'll fall into the freezing water.
At this time, there's an international frisbee shortage, so it's absolutely imperative that
you navigate across the lake and retrieve the disc.
However, the ice is slippery, so you won't always move in the direction you intend.
The surface is described using a grid like the following
SFFF
FHFH
FFFH
HFFG
S : starting point, safe
F : frozen surface, safe
H : hole, fall to your doom
G : goal, where the frisbee is located
The episode ends when you reach the goal or fall in a hole.
You receive a reward of 1 if you reach the goal, and zero otherwise.
"""
MAPS = {
"4x4": [
"SFFF",
"FHFH",
"FFFH",
"HFFG"
],
"8x8": [
"SFFFFFFF",
"FFFFFFFF",
"FFFHFFFF",
"FFFFFHFF",
"FFFHFFFF",
"FHHFFFHF",
"FHFFHFHF",
"FFFHFFFG"
],
}
def __init__(self, desc=None, map_name="4x4", slip_chance=0.2):
if desc is None and map_name is None:
raise ValueError('Must provide either desc or map_name')
elif desc is None:
desc = self.MAPS[map_name]
assert ''.join(desc).count('S') == 1, "this implementation supports having exactly one initial state"
assert all(c in "SFHG" for c in ''.join(desc)), "all cells must be either of S, F, H or G"
self.desc = desc = np.asarray(list(map(list,desc)),dtype='str')
self.lastaction = None
nrow, ncol = desc.shape
states = [(i, j) for i in range(nrow) for j in range(ncol)]
actions = ["left","down","right","up"]
initial_state = states[np.array(desc == b'S').ravel().argmax()]
def move(row, col, movement):
if movement== 'left':
col = max(col-1,0)
elif movement== 'down':
row = min(row+1,nrow-1)
elif movement== 'right':
col = min(col+1,ncol-1)
elif movement== 'up':
row = max(row-1,0)
else:
raise("invalid action")
return (row, col)
transition_probs = {s : {} for s in states}
rewards = {s : {} for s in states}
for (row,col) in states:
if desc[row, col] in "GH": continue
for action_i in range(len(actions)):
action = actions[action_i]
transition_probs[(row, col)][action] = {}
rewards[(row, col)][action] = {}
for movement_i in [(action_i - 1) % len(actions), action_i, (action_i + 1) % len(actions)]:
movement = actions[movement_i]
newrow, newcol = move(row, col, movement)
prob = (1. - slip_chance) if movement == action else (slip_chance / 2.)
if prob == 0: continue
if (newrow, newcol) not in transition_probs[row,col][action]:
transition_probs[row,col][action][newrow, newcol] = prob
else:
transition_probs[row, col][action][newrow, newcol] += prob
if desc[newrow, newcol] == 'G':
rewards[row,col][action][newrow, newcol] = 1.0
MDP.__init__(self, transition_probs, rewards, initial_state)
def render(self):
desc_copy = np.copy(self.desc)
desc_copy[self._current_state] = '*'
#print('\n'.join(map(''.join,desc_copy)), end='\n\n')
|
[
"noreply@github.com"
] |
Pharce.noreply@github.com
|
769b8de587dac9524e70aa9ed4ee4aebce6a6eb1
|
f863d32353ea93bf371831dd0ce7dc53fb737cdc
|
/Cylinder_Driver.py
|
89981730fb6cc27e671a50cccca2e167a625a69a
|
[] |
no_license
|
GeospatialDaryl/503_StochasticCylinders
|
5ccbc1c4e82e20b760248d8e4b37b95227b29d3d
|
38b7948ef6bbf214838dd8a9a93aed62b7474957
|
refs/heads/master
| 2016-08-12T22:30:53.395753
| 2016-01-27T00:51:00
| 2016-01-27T00:51:00
| 50,470,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,543
|
py
|
import logging
import os
from functools import partial
from multiprocessing.pool import Pool
import multiprocessing
from time import time
import ogr
import subprocess
import shlex
import numpy.random as rand
def run_and_return(cmdSrc, cmdDest = ""):
"""Run a system command and return the output"""
srcProcess = subprocess.Popen(shlex.split(cmdSrc), stdout=subprocess.PIPE)
if cmdDest:
destProcess = subprocess.Popen(shlex.split(cmdDest),
stdin=srcProcess.stdout,
stdout=subprocess.PIPE)
stdout, stderr = destProcess.communicate()
else:
stdout, stderr = srcProcess.communicate()
return stdout.decode('ascii')
def run_and_grep(cmdSrc, grepTerm):
"""Run a system command and return the output"""
srcProcess = subprocess.Popen(tuple(cmdSrc.split(" ")), stdout=subprocess.PIPE)
stdout, stderr = srcProcess.communicate()
asciiOut = stdout.decode('ascii').splitlines()
for lines in asciiOut:
if grepTerm in lines:
return lines
def ogrPrettyPrintField(feat,feat_defn,index):
i = index
field_defn = feat_defn.GetFieldDefn(i)
# Tests below can be simplified with just :
# print feat.GetField(i)
if field_defn.GetType() == ogr.OFTInteger: #or field_defn.GetType() == ogr.OFTInteger64:
print "%d" % feat.GetFieldAsInteger(i)
elif field_defn.GetType() == ogr.OFTReal:
print "%.3f" % feat.GetFieldAsDouble(i)
elif field_defn.GetType() == ogr.OFTString:
print "%s" % feat.GetFieldAsString(i)
else:
print "%s" % feat.GetFieldAsString(i)
def ogrTypedFieldVal(feat,feat_defn,index):
i = index
field_defn = feat_defn.GetFieldDefn(i)
# Tests below can be simplified with just :
# print feat.GetField(i)
if field_defn.GetType() == ogr.OFTInteger: #or field_defn.GetType() == ogr.OFTInteger64:
return "%d" % feat.GetFieldAsInteger(i)
elif field_defn.GetType() == ogr.OFTReal:
return "%.3f" % feat.GetFieldAsDouble(i)
elif field_defn.GetType() == ogr.OFTString:
return "%s" % feat.GetFieldAsString(i)
else:
return "%s" % feat.GetFieldAsString(i)
# from download import <func_A>, <func_B>, <func_C>
def ptWKTtoSHP(inPtWKT,outSHPPath,inOID=-9999,inBuffDist=10):
import os
if os.path.exists(outSHPPath): driver.DeleteDataSource(outSHPPath)
ds = driver.CreateDataSource(outSHPPath)
layer = ds.CreateLayer("plot",geom_type=ogr.wkbPolygon)
fieldDef = ogr.FieldDefn("PID",ogr.OFTInteger)
layer.CreateField(fieldDef)
featureDfn = layer.GetLayerDefn()
feat = ogr.Feature(featureDfn)
pt = ogr.CreateGeometryFromWkt(inPtWKT)
bufferDist = 10
poly = pt.Buffer(inBuffDist)
feat.SetGeometry(poly)
feat.SetField("PID",inOID)
layer.CreateFeature(feat)
ds.Destroy()
def unpackPtWKT(ptWKT):
if len(ptWKT) == 2:
return ptWKT
afterP1 = ptWKT.split("(")[1]
beforeP2 = afterP1.split(")" )[0]
X,Y = beforeP2.split(" ")
return (X,Y)
def packPtWKT(tupleXY):
x,y = tupleXY
ptWKT = "POINT ("+str(x)+" "+str(y)+")"
return ptWKT
def extractDictFields(layerDefinition):
dictFields = {}
for i in range(layerDefinition.GetFieldCount()):
lyrName = layerDefinition.GetFieldDefn(i).GetName()
dictFields[lyrName] = i
# print lyrName
return dictFields
def getListOIDs(inputLayer, dictFields, fieldName = "OBECTID"):
layer = inputLayer
layerDefinition = layer.GetLayerDefn()
listOIDs = []
for feats in layer:
geom = feats.GetGeometryRef()
#print geom.Centroid().ExportToWkt()
featdfn = feats.GetDefnRef
OID = ogrTypedFieldVal(feats,layerDefinition, dictFields["OBJECTID"])
listOIDs.append(OID)
return listOIDs
def oidCloudMetric(inTuple, BufferDist = 10, sdX = 1., sdY = 1., nSamples = 100, dist = "Normal" ):
import csv
import matplotlib
import scipy as sp
import numpy as np
dictCommands = { "clippoly" : r"C:\\Apps\\FUSION\\PolyClipData.exe",
"cloudmetrics" : r"C:\\Apps\\FUSION\\cloudmetrics.exe",
"clipdata" : r"C:\\Apps\\FUSION\\ClipData.exe" }
#outdriver=ogr.GetDriverByName('MEMORY')
#source=outdriver.CreateDataSource('memData')
OID = inTuple[0]
ptWKT = inTuple[1]
outdriver = ogr.GetDriverByName("ESRI Shapefile")
outSHPPath = r"Z:\\poly"+str(OID)+".shp"
if os.path.exists(outSHPPath):
outdriver.DeleteDataSource(outSHPPath)
source = outdriver.CreateDataSource(outSHPPath)
layer = source.CreateLayer("Buffers", geom_type = ogr.wkbPolygon)
field_OID = ogr.FieldDefn("OID", ogr.OFTInteger)
field_N = ogr.FieldDefn( "N" , ogr.OFTInteger)
layer.CreateField(field_OID)
layer.CreateField(field_N)
(X,Y) = unpackPtWKT(ptWKT)
Xs = rand.normal( X, sdX, nSamples )
Ys = rand.normal( Y, sdY, nSamples )
maxX = Xs.max() + 15
minX = Xs.min() - 15
maxY = Ys.max() + 15
minY = Ys.min() - 15
for i in range(nSamples):
feature = ogr.Feature(layer.GetLayerDefn() )
thisPtWKT = packPtWKT( (Xs[i], Ys[i]) )
feature.SetField("OID" , OID )
feature.SetField( "N" , i )
point = ogr.CreateGeometryFromWkt(thisPtWKT)
poly = point.Buffer( BufferDist )
feature.SetGeometry(poly)
#CreateSingleton(feature, (OID,i) )
layer.CreateFeature(feature)
feature.Destroy()
source.Destroy()
newFolder = r"Z:\\plot"+str(OID)
try:
os.mkdir( newFolder )
except:
pass
# Generate the SHP of each sampled cylinder
for i in range(nSamples):
listSHP = os.listdir(newFolder)#+r"\\plot"+str(OID)+"_"+str(i) )
match = r"plot"+str(OID)+"_"+str(i)
for items in listSHP:
if match in items: os.remove(newFolder+r"\\"+items)
thisShp = newFolder+r"\\"+"plot"+str(OID)+"_"+str(i)+".shp"
cmdSrc = '''ogr2ogr -f "ESRI Shapefile" -fid '''+str(i)+" "+thisShp+" "+outSHPPath
print run_and_return(cmdSrc)
#Subset the lidar data for the family of cylinder
maxMin = str(minX)+" "+str(minY)+" "+str(maxX)+" "+str(maxY)
clippedBlock = newFolder+r"\\clip.lda"
cmdSrc = dictCommands["clipdata"]+r" /index B:\\LASNorm_out\\*.las "+clippedBlock+" "+maxMin
print run_and_return(cmdSrc)
#Call MP on
tupleOidN = ( 1, 0)
def mpMetricsCylinder(tupelOidN):
'''
'''
OID, N = tupleOidN
# factor this into function
newFolder = r"Z:\\plot"+str(OID)
clippedBlock = newFolder+r"\\clip.lda"
thisShp = newFolder+r"\\"+"plot"+str(OID)+"_"+str(N)+".shp"
thisLDA = newFolder+r"\\"+"plot"+str(OID)+"_"+str(N)+".lda"
outputCSV = "output"+str(OID)+"_"+str(N)+".csv"
# ####
listToClean = [thisLDA,outputCSV]
for items in listToClean:
if os.path.isfile( items ): os.remove( items )
if os.path.isfile( outputCSV ): os.remove( outputCSV )
cmdSrc = dictCommands["clippoly"]+r" /index "+" "+thisShp+" "+thisLDA+" "+clippedBlock
print run_and_return(cmdSrc)
cmdSrc = dictCommands["cloudmetrics"]+r" "+ thisLDA +" " + outputCSV
print run_and_return(cmdSrc)
f = open(outputCSV, "r")
k = 0
dictHeader = {}
listLines = []
for lines in f:
print lines
if k == 0 :
listLine = lines.replace(" ","_").rstrip().split(",")
for i in range(len(listLine)):
dictHeader[i] = listLine[i]
print listLine[i]
k= k+1
listLines.append(listLine)
#print listLine
elif k > 1:
listLine = lines.split(",")
listLines.append(listLine)
goodLine= listLine
return goodLine
os.chdir(newFolder)
print mpMetricsCylinder( tupleOidN )
listToClean = [thisShp,thisLDA,outputCSV,clippedBlock]
for items in listToClean:
if os.path.isfile( items ): os.remove( items )
os.chdir(r"..")
def main():
# ##
# 0 Logging Init
# ##
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.getLogger('requests').setLevel(logging.CRITICAL)
logger = logging.getLogger(__name__)
listCloudMetrics = []
sourceLAS = r"B:\\LASNorm_out"
sourceSHP = r"A:\\AllPlotCenters_fromEditedSSF.shp"
destDir = r"B:\\LASNorm_out"
listGrid = []
listMetrics2 = []
inSHP = r"A:\AllPlotCenters_fromEditedSSF.shp"
driver = ogr.GetDriverByName('ESRI Shapefile')
dataSource = driver.Open(inSHP, 0) # 0 means read-only. 1 means writeable.
if dataSource is None:
print 'Could not open %s' % (inSHP)
else:
print 'Opened %s' % (inSHP)
layer = dataSource.GetLayer()
featureCount = layer.GetFeatureCount()
lyrDefn = layer.GetLayerDefn()
fieldCount = lyrDefn.GetFieldCount()
dictFields = extractDictFields(lyrDefn)
print "Number of features in %s: %d" % (os.path.basename(inSHP),featureCount)
listOIDs = []
listOIDs = getListOIDs(layer, dictFields)
print listOIDs
layer.ResetReading()
ptWKTs = []
feat = layer.GetNextFeature()
while feat:
geom = feat.GetGeometryRef()
ptWKT = geom.ExportToWkt()
OID = feat.GetFieldAsInteger(0)
inTuple = (OID,ptWKT)
ptWKTs.append(inTuple)
del ptWKT
feat = layer.GetNextFeature()
print ptWKTs
ts = time()
#listOIDs = getListOIDs(layer,dictFields)
#with Pool(10) as p:
p = Pool(10)
# ptCloudMetric(feature, dictFields, ogrTypedFieldVal, layerDefinition, ptWKTtoSHP, dictCommands):
#partialCM = partial(oidCloudMetric, layerDefinition )
res = oidCloudMetric( ptWKTs[0] )
outputs = [result[0] for result in res]
print outputs
print res
#p.map(oidCloudMetric, ptWKTs)
logging.info('Took %s seconds', time() - ts)
if __name__ == '__main__':
main()
|
[
"daryl_van_dyke@fws.gov"
] |
daryl_van_dyke@fws.gov
|
939593332352dd37c18a98c7d06f74bf700b8750
|
a2151340197e0fcc3685eacf087aa57b2c5ac6eb
|
/lang101/bin/f2py
|
44ab6f442938ca5c3efc6c65497369e4c5d3f378
|
[
"MIT"
] |
permissive
|
haniamatera/cds-language
|
97d7feb5541bc179f88465e59dd71d24e1af3884
|
4014aa2af00947c61c8a27f6ce87c4b8b9d721cb
|
refs/heads/main
| 2023-03-07T11:42:39.637967
| 2021-03-01T17:27:57
| 2021-03-01T17:27:57
| 338,007,567
| 0
| 0
|
MIT
| 2021-02-11T11:19:08
| 2021-02-11T11:19:08
| null |
UTF-8
|
Python
| false
| false
| 267
|
#!/Users/hannamatera/Desktop/CDS_new/cds-language/lang101/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"haniamatera@gmail.com"
] |
haniamatera@gmail.com
|
|
c48f86a907b699de10b5ca7f7e98500310dd7653
|
d858bfab35856192d4ef79059385060930463b40
|
/program.py
|
72e3d20f3d8948a165c7cb994c85b76a42fb6426
|
[] |
no_license
|
zlata24/First-Python-Game
|
3f16180a070c824fe187b7279e67cd94245a5184
|
a7da211fef89d5bb41d09074bc5fd7f8de2bd706
|
refs/heads/master
| 2022-11-05T16:26:41.071274
| 2020-06-15T03:29:38
| 2020-06-15T03:29:38
| 272,332,941
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,203
|
py
|
# My first game
# Welcome message
print("Welcome to the game!")
print("Let's play!")
print()
# def dictionary
dictionary = {}
# Ask user's info
print("Please submit your age")
prompt_age = input("> ")
prompt_age = int(prompt_age)
print("Please submit your name")
prompt_name = input("> ")
prompt_name = prompt_name.title()
dictionary['employee'] = (prompt_name, prompt_age)
print("What language would you like to start with?")
prompt_lang = input("> ")
prompt_lang = prompt_lang.title()
dictionary["lang"] = prompt_lang
if prompt_lang == 'Python':
print(r'☆。★。☆。★')
print(r' 。☆ 。☆。☆')
print(r'★。\|/。★')
print(r'- Yay Python! -')
print(r'★。/|\。★')
print(r' 。☆。。☆ ')
print(r'☆。★。 ☆ ★')
else:
print("Yay!")
# Questionnaire about user's future life choices
while True:
print("Are you ready to become an engineer?")
ready = input("> ")
ready = ready.upper()
if (ready == "EXIT"):
print("Goodbye!")
break
elif (ready.startswith("Y")):
print("I knew you'd say that! Being engineer is so cool!")
elif (ready.startswith("N")):
print("Well, maybe you need some more time to think about it.")
elif (ready == "I DONT KNOW"):
print("Let me explain why being engineer is so cool!")
print("You get to work with the most talented people and unique programs!")
else:
print("Sorry, wrong answer")
# Red vs blue buttons
print()
print("You have a two choices: pressing a red button or a blue. Press r or b to see your future.")
button = input("> ")
button = button.lower()
if (button.startswith("r")):
print("Congratulations! You just became an engineer!")
else:
print("Unfortunately right now you are not eligible to become an engineer. Try to pick up on some more coding.")
# Choosing a path
print()
print("Please make a selection: Would you like to go right or left? Enter r or l.")
way = input("> ")
way = way.lower()
if (way == "r"):
print("Fantastic! You just got an offer from Brightitech!")
elif (way == "l"):
print("Keep looking for your dream job!")
# Joining a team
print()
team_info = []
print("Enter five teams you are looking to join:")
team_name = input("> ")
team_info.append(team_name)
team_name = input("> ")
team_info.append(team_name)
team_name = input("> ")
team_info.append(team_name)
team_name = input("> ")
team_info.append(team_name)
team_name = input("> ")
team_info.append(team_name)
print("There is so many great teams at Brightitech:")
for team in team_info:
print(team)
# The five functions
def first_team(team_name):
print('''
The {} team would like to you to solve a simple math problem. If your solution will pass, you may join this team.
x = 100
y = 5
modulo = x % y'''.format(team_name))
print("Enter the value for modulo:")
modulo = input("> ")
modulo = int(modulo)
if modulo == 0:
return True
else:
print("Wrong answer")
return False
is_right_answer = first_team(team_info[0])
if is_right_answer:
print("Congratulations on making it to {} team at Brightitech!".format(team_info[0]))
# def second_team():
# second_team()
def third_team(team_name):
print('''
The {} team would like you to answer this technical question:
How do you could the length of characters in a string in Python?
A: len(str)
B: count(str)
C: sizeof(str)
Enter choice A, B or C:
'''.format(team_name))
choice = input("> ")
if choice == "A":
print("Welcome to our team.")
return True
else:
print("Try joining another team.")
return False
is_correct_answer = third_team(team_info[2])
if is_correct_answer:
print("Congratulations on making it to {} team at Brightitech".format(team_info[2]))
# Printing information on new engineer
print()
print("For all new employees we will be storing their information in our file system.")
for key, val in dictionary.items():
print('{}: {}'.format(key, val))
print("Please select Y or N if your information is correct:")
selection = input("> ")
selection = selection.upper()
print("Thank you for playing this game! Goodbye!")
# def fourth_team():
# forth_team()
# def fifth_team():
# fifth_team()
|
[
"noreply@github.com"
] |
zlata24.noreply@github.com
|
b96ebaae609294cdc8b57b616afb0e1dd7dc6029
|
e6e98dee9ef88fe8cb72a5de3027c0fc721d529c
|
/magicbroomsite/migrations/0017_auto_20170906_1226.py
|
7fdbee83c720f27ebc7dc95c23739b022335127f
|
[] |
no_license
|
aboudaman/magicbroom
|
eb80704ed091b788b8e8d8d8f62e6ab8bb10bcd7
|
349cb23ccd44746cddc9708d08cd2558d840ec00
|
refs/heads/master
| 2021-05-05T17:48:07.826887
| 2018-01-06T15:34:43
| 2018-01-06T15:34:43
| 103,464,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-06 12:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('magicbroomsite', '0016_auto_20170906_1226'),
]
operations = [
migrations.AlterField(
model_name='quotationrequests',
name='home_info_boost',
field=models.CharField(blank=True, choices=[('1 bed', '1 bed'), ('2 bed', '2 bed'), ('3 bed', '3 bed')], default='1 bed', max_length=155, null=True),
),
]
|
[
"adaman2000@gmail.com"
] |
adaman2000@gmail.com
|
255799a7a5d5345843cfb229475108352d8fed79
|
785ff1324640cf8de7d474c49673dd80aa509d77
|
/Day7/exercise_4.py
|
296bd241ff51610eb8a8c0e70499ed3e777af3ce
|
[] |
no_license
|
jerry0734/learnPy100D
|
663a8321164ac57d196f6c5219d2858b57c65603
|
06d3940cd7ca32485168bf39930d07d6656263bc
|
refs/heads/master
| 2022-12-19T12:09:13.846034
| 2020-09-22T03:42:55
| 2020-09-22T03:42:55
| 293,426,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
"""
设计一个函数返回传入的列表中最大和第二大的元素的值
"""
import random
def get_max(numbers):
max_1 = numbers[0]
max_2 = 0
for i in range(1, len(numbers)):
if numbers[i] > max_1:
max_2, max_1 = max_1, numbers[i]
elif numbers[i] > max_2:
max_2 = numbers[i]
print(i, max_1, max_2)
return max_1, max_2
numbers = []
i = 0
while i < 10:
i += 1
numbers.append(random.randint(0, 1000))
print(numbers)
print(get_max(numbers))
|
[
"jerry34@foxmail.com"
] |
jerry34@foxmail.com
|
fbaa5089d6e1da6ee4b4ae8fbbb0307f970b896d
|
07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8
|
/lib/python3.6/site-packages/zmq/backend/cython/__init__.py
|
23d46766ea57349610e4f4bee39629dbae38036a
|
[] |
no_license
|
cronos91/ML-exercise
|
39c5cd7f94bb90c57450f9a85d40c2f014900ea4
|
3b7afeeb6a7c87384049a9b87cac1fe4c294e415
|
refs/heads/master
| 2021-05-09T22:02:55.131977
| 2017-12-14T13:50:44
| 2017-12-14T13:50:44
| 118,736,043
| 0
| 0
| null | 2018-01-24T08:30:23
| 2018-01-24T08:30:22
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a653e6cc07468a5fe090dadca6864bc60a827cd8d17835f62fbf13c676fb7f98
size 627
|
[
"seokinj@jangseog-in-ui-MacBook-Pro.local"
] |
seokinj@jangseog-in-ui-MacBook-Pro.local
|
8e15beec0f102900873eca9f776a89110c832296
|
b9b9a66f853c76701d67ea408712f3aa84a0e018
|
/codewars/codewars_alphabet_anagram.py
|
63b0ed0fd55a564bc5481cbf4d51ce2ddaf68c83
|
[] |
no_license
|
MiConnell/Katas
|
5dcaf73aad3b5f534d898c0ca260b1b8880e379d
|
f1765f3e9f3c0cc665e6bca2dc98d20781a773a1
|
refs/heads/master
| 2023-04-05T00:36:32.196579
| 2021-04-17T22:37:23
| 2021-04-17T22:37:23
| 286,301,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
# https://www.codewars.com/kata/53e57dada0cb0400ba000688/train/python
"""
Consider a "word" as any sequence of capital letters A-Z (not limited to just "dictionary words").
For any word with at least two different letters,
there are other words composed of the same letters but in a different order
(for instance, STATIONARILY/ANTIROYALIST, which happen to both be dictionary words;
for our purposes "AAIILNORSTTY" is also a "word" composed of the same letters as these two).
We can then assign a number to every word, based on where it falls in an alphabetically
sorted list of all words made up of the same group of letters.
One way to do this would be to generate the entire list of words and find the desired one,
but this would be slow if the word is long.
Given a word, return its number. Your function should be able to accept any word 25 letters
or less in length (possibly with some letters repeated), and take no more than 500 milliseconds to run.
To compare, when the solution code runs the 27 test cases in JS, it takes 101ms.
For very large words, you'll run into number precision issues in JS
(if the word's position is greater than 2^53). For the JS tests with large positions,
there's some leeway (.000000001%). If you feel like you're getting it right for the smaller ranks,
and only failing by rounding on the larger, submit a couple more times and see if it takes.
Sample words, with their rank:
ABAB = 2
AAAB = 1
BAAA = 4
QUESTION = 24572
BOOKKEEPER = 10743
"""
import itertools
def list_position(word: str) -> int:
combs = itertools.permutations(word, (len(word)))
all_list = sorted(["".join(i) for i in combs])
res = {c: i for i, c in enumerate(all_list, start=1)}
return res[word]
if __name__ == "__main__":
print(list_position("QUESTION"))
|
[
"connellmp@gmail.com"
] |
connellmp@gmail.com
|
55723d6e280682d8c762ef2dfe520b0ded9f7962
|
a9e60d0e5b3b5062a81da96be2d9c748a96ffca7
|
/configurations/lab44-config/scripts/Demo/demoPeakFindingInFiles01.py
|
2d9fee5e77d66d72e91790e437c8f74cec9c8c77
|
[] |
no_license
|
openGDA/gda-diamond
|
3736718596f47607335ada470d06148d7b57526e
|
bbb64dcfd581c30eddb210c647db5b5864b59166
|
refs/heads/master
| 2023-08-16T08:01:11.075927
| 2023-08-15T16:01:52
| 2023-08-15T16:01:52
| 121,757,699
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
import math;
import myFun;
y1 = DeviceFunctionClass("y1", "testMotor1","testMotor2", "myFun.testFunY1");
y2 = DeviceFunctionClass("y2", "testMotor1","testMotor2", "myFun.testFunY2");
scan testMotor1 -2*math.pi 2*math.pi 0.1 y1
scan testMotor1 -2*math.pi 2*math.pi 0.1 y2
data=ScanFileHolder();
#load the SRS data set
data.loadSRS()
#load the data set from run 13479
#data.loadSRS(13479)
#data.loadSRS("13479.dat")
#print the axis information about the data set
#data.info()
data.ls()
#plot
#data.plot("y1")
#get one axis from the data set
#data.getAxis("testMotor1")
#data.getDataSet("testMotor1")
#data.getAxis(1)
#To find all peaks which appear to be peaks at the given deltawidth
# i.e. if there are 3 points deltawidth appart and the middle one is
# highest then this is classed as a peak
# @param XAxis, The X axis of the graph to fit
# @param YAxis, The Y Axis ofdir the graph to fit
# @param deltaWidth The width of the peak information
# @return A dataset containing the positions of all the peaks found.
x=data.getAxis("testMotor1");
y=data.getAxis("y2");
dm=data.getMax("y2");
dmp=data.getMaxPos("y2");
dmpx=data.getMaxPos("testMotor1", "y2");
print dmp, dmpx, dm
|
[
"fajin.yuan@diamond.ac.uk"
] |
fajin.yuan@diamond.ac.uk
|
56f70869124c25dbc8211897f56c3679c2655df4
|
8dfee16e9c680d528439767316428b9e4f609dbd
|
/{{ cookiecutter.project_name }}/config/settings/production.py
|
6888ae345433d39e5719c28ccfe33a0937ae8f4d
|
[] |
no_license
|
joakimekman/cookiecutter-django-ekman
|
c9ffb3d36b20abc2399495d8352a88d03fbcfd0f
|
727387947315f2c859d5683fe48e3c4f83465cb0
|
refs/heads/master
| 2022-12-14T14:23:24.412004
| 2019-12-18T12:06:43
| 2019-12-18T12:06:43
| 167,615,654
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
from .base import *
from decouple import config
# GENERAL
# --------------------------------------------------------------------
DEBUG = False
ALLOWED_HOSTS = [
'localhost',
'0.0.0.0',
'127.0.0.1',
]
# DATABASES
# --------------------------------------------------------------------
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': config('DB_PORT'),
}
}
# PASSWORDS
# --------------------------------------------------------------------
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# STATIC FILES (CSS, JS, IMAGES)
# --------------------------------------------------------------------
STATIC_ROOT = BASE_DIR / "{{ cookiecutter.project_name }}" / "staticfiles"
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / "{{ cookiecutter.project_name }}" / "static",
]
# MEDIA FILES (UPLOADED BY USERS)
# --------------------------------------------------------------------
MEDIA_ROOT = BASE_DIR / "{{ cookiecutter.project_name }}" / "media"
MEDIA_URL = '/media/'
|
[
"joakimekman91@gmail.com"
] |
joakimekman91@gmail.com
|
c57af0edba753c73d1baa7c1d20d1e517b07d71f
|
6b29d66ba7927129b68bc00db769f0edf1babaea
|
/SoftLayer/CLI/cdn/load.py
|
5dc53ca2bf549af7d5d051fbf032636005fb948f
|
[
"MIT"
] |
permissive
|
tdurden82/softlayer-python
|
65f42923c347a164995dfc267829721032de261d
|
0eed20fa4adedd3228d91d929bb8befb1e445e49
|
refs/heads/master
| 2021-01-17T10:01:48.087450
| 2015-10-19T18:38:53
| 2015-10-19T18:38:53
| 46,301,339
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
"""Cache one or more files on all edge nodes."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
import click
@click.command()
@click.argument('account_id')
@click.argument('content_url', nargs=-1)
@environment.pass_env
def cli(env, account_id, content_url):
"""Cache one or more files on all edge nodes."""
manager = SoftLayer.CDNManager(env.client)
manager.load_content(account_id, content_url)
|
[
"k3vinmcdonald@gmail.com"
] |
k3vinmcdonald@gmail.com
|
80f2060d4bf99a23db28ef2a0835f1db2ae4d018
|
8080c75c96bfb4314b92b64e217523fbd418e10e
|
/users/urls.py
|
38e9dcbc93fc5ec8235a1eef4b2411ffbf96aa24
|
[] |
no_license
|
aktober/Belezhnik
|
a0b6bc275aa81650371d3c0bcf5c782c290ac1fb
|
3585a84fb046737a2277ed614157750642602717
|
refs/heads/master
| 2021-05-04T03:03:02.725417
| 2018-02-06T21:31:33
| 2018-02-06T21:31:33
| 120,371,507
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
from django.urls import path
from users import views
app_name = 'users'
urlpatterns = [
path(r'login/', views.LoginPage.as_view(), name='login'),
path(r'logout/', views.LogoutPage.as_view(), name='logout'),
path(r'profile/', views.ProfilePage.as_view(), name='profile'),
path(r'register/', views.RegisterPage.as_view(), name='register'),
path(r'activate/<int:pk>/<token>/', views.activate, name='activate'),
# todo: add reset password
]
|
[
"a.popovychenko@gmail.com"
] |
a.popovychenko@gmail.com
|
9cb09920c20113c946ce4e32464a041bb6bc7a0e
|
7f3f8bc74858fc80a50ebe44cfaa67a7c1f773f2
|
/sw.py
|
1efd9942d77e9b98687381cf2ef44d86496423d6
|
[] |
no_license
|
lubingchen/mcs
|
78a1ac5f5ac00683a0f4b8f2ecbbeeb4c073795b
|
8881d1f75f93dcbe81cca7964b3c7ad606828029
|
refs/heads/master
| 2020-09-19T23:21:34.570327
| 2019-12-04T09:31:07
| 2019-12-04T09:31:07
| 224,321,960
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
#!/usr/bin/python3
import RPi.GPIO as GPIO
import time
# Use physical pin numbers
GPIO.setmode(GPIO.BCM)
# Set up header pin 22 (GPIO25) as an input
buttonPin = 24
print ("Setup Pin 22")
GPIO.setup(buttonPin, GPIO.IN)
while True:
#take a reading
input = GPIO.input(24)
#if the last reading was low and this one high, print
if (input):
print("Button pressed")
else :
print("release")
#slight pause to debounce
time.sleep(0.05)
|
[
"912target@gmail.com"
] |
912target@gmail.com
|
0780b1837a14ec6809961f1886b5e5715638b300
|
f99a83f3d538a121184de88bff19ce396be6e3d5
|
/stayclean-2019-july/serve-signups-with-flask.py
|
713d2236ae476172de0ca6feeb7bc7eee49c7036
|
[
"MIT"
] |
permissive
|
foobarbazblarg/stayclean
|
c38deddd971b795af58ae389b9e65914dea08d2d
|
384a8261b1164797d6726166a6e40f323e5bd6dd
|
refs/heads/master
| 2023-02-21T09:48:57.907540
| 2023-01-02T15:32:35
| 2023-01-02T15:32:35
| 45,186,602
| 1
| 0
|
MIT
| 2023-02-16T03:49:00
| 2015-10-29T13:59:33
|
Python
|
UTF-8
|
Python
| false
| false
| 8,591
|
py
|
#!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ 'c508oc', 'c58550', 'c5v9jc', 'c64ev3', 'c6ilv3', 'c6yela', 'c7c6ju' ]
flaskport = 8961
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
# submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
commentForest = submission.comments
comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
[
"foobarbazblarg@gmail.com"
] |
foobarbazblarg@gmail.com
|
213220daf45739a739835cfbc5c38812db7edeb7
|
44f452c7af9943583e343449d5d1db3e7581ca46
|
/venv/bin/pip
|
3867aa60e67d7da60c7099c31e7659c6468507b6
|
[] |
no_license
|
Abhay-Bhaskar/aTunes
|
403ca1c2b1a96dcd5d8f35d161de51a1361fb7ad
|
e0236c78b84668acceb02e3973cb3818cf145ea0
|
refs/heads/master
| 2020-04-13T12:07:03.668081
| 2018-12-26T21:03:24
| 2018-12-26T21:03:24
| 163,193,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
#!/home/abhay/PycharmProjects/aTunes/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"abhayakshay@live.com"
] |
abhayakshay@live.com
|
|
7d2ecfb80d138ad1ffefda05a379fa418bae027a
|
852f9c1ab2a15c8f5ee452141ade8925dc633907
|
/ps1b.py
|
d1dade638255c41bd2f6f40a984b5f9628027eaa
|
[
"Giftware"
] |
permissive
|
avitide-ethan/MIT-OCW-6_0001F16
|
1808a58526dd395bd9b320295c290ef189f80674
|
afb3aee630755c3e7896708a44aa318831250a6a
|
refs/heads/master
| 2021-04-26T23:18:05.401169
| 2018-04-04T03:55:50
| 2018-04-04T03:55:50
| 123,967,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
print("This program will calculate how many months it will take to save up enough money for a down payment.")
annual_salary = float(input("Enter your annual salary: "))
portion_saved = float(input("Enter the percent of your salary to save, as a decimal: "))
total_cost = float(input("Enter the cost of your dream home: "))
semi_annual_raise = float(input("Enter the semi-annual raise, as a decimal: "))
monthly_salary = annual_salary / 12
portion_down_payment = 0.25
current_savings = 0
r = 0.04
months = 0
while current_savings < total_cost * portion_down_payment:
# for months in range(5):
# print(f"Month {months}")
# print(f"Monthly salary {monthly_salary}")
# print(f"Savings beginning of month: {current_savings}")
current_savings = current_savings + current_savings * r/12 # annual return of r
# print(f"Savings end of month: {current_savings}")
current_savings = current_savings + monthly_salary * portion_saved
months += 1
if (months - 1) % 6 ==0 and months != 1:
monthly_salary = monthly_salary * (1 + semi_annual_raise)
else:
pass
print("Number of months: {}".format(months))
|
[
"ethan.dow@avitide.com"
] |
ethan.dow@avitide.com
|
fd6c4b075eb403bf8dc7c18c9b75ace27bc189e2
|
1a166165ab8287d01cbb377a13efdb5eff5dfef0
|
/sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_connection_monitors_operations.py
|
46689ddf45957cd45d6d3a96b4110fa0e9405644
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
manoj0806/azure-sdk-for-python
|
7a14b202ff80f528abd068bf50334e91001a9686
|
aab999792db1132232b2f297c76800590a901142
|
refs/heads/master
| 2023-04-19T16:11:31.984930
| 2021-04-29T23:19:49
| 2021-04-29T23:19:49
| 363,025,016
| 1
| 0
|
MIT
| 2021-04-30T04:23:35
| 2021-04-30T04:23:35
| null |
UTF-8
|
Python
| false
| false
| 45,729
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations(object):
"""ConnectionMonitorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorResult"]
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Update tags of the specified connection monitor.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters supplied to update connection monitor tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def _query_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorQueryResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def begin_query(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorQueryResult"]
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ConnectionMonitorListResult"]
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
|
[
"noreply@github.com"
] |
manoj0806.noreply@github.com
|
d7d3a2e41838be4f9888283ce8a5ea31e6985298
|
95f1359cf44ecdbf9c3d418f6ae3dbc0f0a9dcb0
|
/probFactors.py
|
92b4dfd6eab2c19ecca212cec94c3a10a07fd759
|
[] |
no_license
|
vitcal78/MED
|
c0d79a77edcaea6fb9b1c7002310a78c7efe1534
|
1d2e5bcef1dbcfc8779b2ccd117972d7ca61e01f
|
refs/heads/main
| 2023-02-13T19:23:45.364238
| 2021-01-09T07:49:49
| 2021-01-09T07:49:49
| 328,093,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,975
|
py
|
# probFactors.py - Factor manipulation for graphical models
# AIFCA Python3 code Version 0.8.4 Documentation at http://aipython.org
# Artificial Intelligence: Foundations of Computational Agents
# http://artint.info
# Copyright David L Poole and Alan K Mackworth 2017-2020.
# This work is licensed under a Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# See: http://creativecommons.org/licenses/by-nc-sa/4.0/deed.en
from functools import reduce
#from probVariables import Variable
class Factor(object):
nextid=0 # each factor has a unique identifier; for printing
def __init__(self,variables):
"""variables is the ordered list of variables
"""
self.variables = variables # ordered list of variables
# Compute the size and the offsets for the variables
self.var_offsets = {}
self.size = 1
for i in range(len(variables)-1,-1,-1):
self.var_offsets[variables[i]]=self.size
self.size *= variables[i].size
self.id = Factor.nextid
self.name = "f"+str(self.id)
Factor.nextid += 1
def get_value(self,assignment):
raise NotImplementedError("get_value") # abstract method
def __str__(self, variables=None):
"""returns a string representation of the factor.
Allows for an arbitrary variable ordering.
variables is a list of the variables in the factor
(can contain other variables)"""
if variables==None:
variables = self.variables
else:
variables = [v for v in variables if v in self.variables]
res = ""
for v in variables:
res += str(v) + "\t"
res += self.name+"\n"
for i in range(self.size):
asst = self.index_to_assignment(i)
for v in variables:
res += str(asst[v])+"\t"
res += str(self.get_value(asst))
res += "\n"
return res
def brief(self):
"""returns a string representing a summary of the factor"""
res = self.name+"("
for i in range(0,len(self.variables)-1):
res += str(self.variables[i])+","
if len(self.variables)>0:
res += str(self.variables[len(self.variables)-1])
res += ")"
return res
__repr__ = brief
def assignment_to_index(self,assignment):
"""returns the index where the variable:value assignment is stored"""
index = 0
for var in self.variables:
index += var.val_to_index[assignment[var]]*self.var_offsets[var]
return index
def index_to_assignment(self,index):
"""gives a dict representation of the variable assignment for index
"""
asst = {}
for i in range(len(self.variables)-1,-1,-1):
asst[self.variables[i]] = self.variables[i].domain[index % self.variables[i].size]
index = index // self.variables[i].size
return asst
class Factor_stored(Factor):
def __init__(self,variables,values):
Factor.__init__(self, variables)
self.values = values
def get_value(self,assignment):
return self.values[self.assignment_to_index(assignment)]
class Factor_observed(Factor):
def __init__(self,factor,obs):
Factor.__init__(self, [v for v in factor.variables if v not in obs])
self.observed = obs
self.orig_factor = factor
def get_value(self,assignment):
ass = assignment.copy()
for ob in self.observed:
ass[ob]=self.observed[ob]
return self.orig_factor.get_value(ass)
class Factor_sum(Factor_stored):
def __init__(self,var,factors):
self.var_summed_out = var
self.factors = factors
vars = []
for fac in factors:
for v in fac.variables:
if v is not var and v not in vars:
vars.append(v)
Factor_stored.__init__(self,vars,None)
self.values = [None]*self.size
def get_value(self,assignment):
"""lazy implementation: if not saved, compute it. Return saved value"""
index = self.assignment_to_index(assignment)
if self.values[index]:
return self.values[index]
else:
total = 0
new_asst = assignment.copy()
for val in self.var_summed_out.domain:
new_asst[self.var_summed_out] = val
prod = 1
for fac in self.factors:
prod *= fac.get_value(new_asst)
total += prod
self.values[index] = total
return total
def factor_times(variable,factors):
"""when factors are factors just on variable (or on no variables)"""
prods= []
facs = [f for f in factors if variable in f.variables]
for val in variable.domain:
prod = 1
ast = {variable:val}
for f in facs:
prod *= f.get_value(ast)
prods.append(prod)
return prods
class Prob(Factor_stored):
"""A factor defined by a conditional probability table"""
def __init__(self,var,pars,cpt):
"""Creates a factor from a conditional probability table, cptf.
The cpt values are assumed to be for the ordering par+[var]
"""
Factor_stored.__init__(self,pars+[var],cpt)
self.child = var
self.parents = pars
assert self.size==len(cpt),"Table size incorrect "+str(self)
def cond_dist(self,par_assignment):
"""returns the distribution (a val:prob dictionary) over the child given
assignment to the parents
par_assignment is a variable:value dictionary that assigns values to parents
"""
index = 0
for var in self.parents:
index += var.val_to_index[par_assignment[var]]*self.var_offsets[var]
# index is the position where the disgribution starts
return {self.child.domain[i]:self.values[index+i] for i in range(len(self.child.domain))}
def cond_prob(self,par_assignment,child_value):
"""returns the probability child has child_value given
assignment to the parents
par_assignment is a variable:value dictionary that assigns values to parents
child_value is a value to the child
"""
index = self.child.val_to_index[child_value]
for var in self.parents:
index += var.val_to_index[par_assignment[var]]*self.var_offsets[var]
return self.values[index]
class Factor_rename(Factor):
def __init__(self,fac,renaming):
Factor.__init__(self,list(renaming.keys()))
self.orig_fac = fac
self.renaming = renaming
def get_value(self,assignment):
return self.orig_fac.get_value({self.renaming[var]:val
for (var,val) in assignment.items()
if var in self.variables})
|
[
"vitcal78@gmail.com"
] |
vitcal78@gmail.com
|
9371f82b30f28ec2ea8f912d2ebb28d543b00493
|
478fad0652b25cc228eb6c02c56b6a944d23b217
|
/Commands.py
|
54a4685ee79ff5063e4a41c07c8c8b9f541ebdc1
|
[] |
no_license
|
vinayreddy115/ds_salary_proj
|
a2fba0bc474efe43ad0c2cf23badc3b52b05a585
|
e88edd1cd2fb0b2ba5d556be8a6ceaf879f95460
|
refs/heads/master
| 2022-12-22T21:37:57.398366
| 2020-10-08T18:41:47
| 2020-10-08T18:41:47
| 302,356,568
| 1
| 0
| null | 2020-10-08T18:41:48
| 2020-10-08T13:52:29
|
Python
|
UTF-8
|
Python
| false
| false
| 17
|
py
|
print(df.head(1))
|
[
"baradivinay115@gmail.com"
] |
baradivinay115@gmail.com
|
17ae3745e60da3b1ea7472cb392d4faf0ecae016
|
e4ec350f85c7c7082056596cb35fbeac4f20b174
|
/app/SqueezeLayer.py
|
22ea82b8eb0dcd4c7fbc02781ac69a7dd47999eb
|
[] |
no_license
|
benbogart/bird_vocalization_classification_dashboard
|
6945319e53d664274e21ae8f8f52f9679b23a694
|
386ea2952494bc1de154e9feb4ec20663da43a31
|
refs/heads/main
| 2023-04-22T23:41:02.314555
| 2021-04-23T14:51:49
| 2021-04-23T14:51:49
| 358,924,236
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 952
|
py
|
from tensorflow import keras as K
from tensorflow.keras.layers import Layer
class SqueezeLayer(Layer):
'''
Keras squeeze layer
Taken from milsed https://github.com/marl/milsed/
'''
def __init__(self, axis=-1, **kwargs):
super(SqueezeLayer, self).__init__(**kwargs)
self.axis = axis
def get_output_shape_for(self, input_shape):
# shape = np.array(input_shape)
# shape = shape[shape != 1]
# return tuple(shape)
shape = list(input_shape)
del shape[self.axis]
return tuple(shape)
def compute_output_shape(self, input_shape):
return self.get_output_shape_for(input_shape)
def call(self, x, mask=None):
return K.backend.squeeze(x, axis=self.axis)
def get_config(self):
config = {'axis': self.axis}
base_config = super(SqueezeLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
[
"ben@benbogart.com"
] |
ben@benbogart.com
|
2122bf334da1efff325d7418ef69af911529d88a
|
2300a140b3506c6e934794f0da40fc466e50b310
|
/senti/crawler/migrations/0002_auto_20160408_1058.py
|
ea25cfa0a917edc35a7e2aa348ede314d461d21b
|
[] |
no_license
|
chiranjeevjain/sentientX
|
28baebfe1bbed32085db337c98d7b4f90775beff
|
b4aeb296be982d225f839c25cdedf354b00c02a6
|
refs/heads/master
| 2021-01-24T08:15:52.644798
| 2016-04-17T16:35:41
| 2016-04-17T16:35:41
| 68,904,940
| 1
| 0
| null | 2016-09-22T09:15:35
| 2016-09-22T09:15:34
| null |
UTF-8
|
Python
| false
| false
| 600
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-08 10:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crawler', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='category',
name='total',
field=models.SmallIntegerField(default=1500),
),
migrations.AddField(
model_name='product',
name='total',
field=models.SmallIntegerField(default=0),
),
]
|
[
"chanakya.malireddy@gmail.com"
] |
chanakya.malireddy@gmail.com
|
ffc572e9dbd67af628b274ff6ba22caa5f2f11d6
|
2bcfa4f50aee2f20f1829522153d5b490eca9120
|
/scripts/download_comms.py
|
b7115a71469a4a8ff639ad95de41e79a53c43a1c
|
[] |
no_license
|
ronaldmaj/InsightDataProject
|
0302ee8e100b8b45f6a80b044a567010493ac796
|
9b54bd6ff5a35b1d9a0b94a1b6327f71c8c7e24c
|
refs/heads/master
| 2020-12-11T20:02:15.094121
| 2020-02-14T03:56:51
| 2020-02-14T03:56:51
| 233,945,248
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,116
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 21 12:22:06 2020
Script to scrape comments from the top 50 results for the term 'vlog'
@author: Ronald Maj
"""
#%%
import os
import time
import googleapiclient.discovery
import pandas as pd
import yt_cm as cm
import json
#%%
insight_dir = os.getcwd()[0:19]+'\\Documents\\GitHub\\InsightDataProject\\'
# Set up the Youtube client, search for the top 50 results for 'vlog' and
# extract the channel Ids into a dataframe:
YT_client = cm.set_up_YT_client()
#%%
def channel_seach_call(YT_client, query):
top_vlogs_search = cm.search_results(YT_client,'vlog')
json_txt = json.dumps(top_vlogs_search)
if not os.listdir(insight_dir+'data\\raw\\channels\\'):
with open(
insight_dir
+'data\\raw\\channels\\'
+ 'channels_json_1'
+ '.json','w') as file:
file.write(json_txt)
else:
with open(
insight_dir
+'data\\raw\\channels\\'
+ 'channels_json_' + str(len(os.listdir(insight_dir+'data\\raw\\channels\\'))+1)
+ '.json','w') as file:
file.write(json_txt)
return top_vlogs_search
top_vlogs_search = channel_seach_call(YT_client,'vlog')
top_vlogs = top_vlogs_search['items']
channel_info = {
'ChannelTitle':[vlog['snippet']['title'] for vlog in top_vlogs],
'ChannelID':[vlog['snippet']['channelId'] for vlog in top_vlogs],
'ChannelDescription':[vlog['snippet']['description'] for vlog in top_vlogs]
}
channels_df = pd.DataFrame.from_dict(channel_info)
#channels_df.to_csv(insight_dir+'data\\processed\\channels_top50_df.csv')
#%%
# For each of the channels, get the most recent 30 videos from the channel and
# save in dataframe
vids_df = pd.DataFrame(
columns=['VidID',
'ChannelID',
'VidTitle',
'VidDescription',
'VidPublished'])
def videos_of_channel_call(YT_client, channel_id):
vids_dict = cm.get_videos_of_channel(YT_client, channel_id)
json_txt = json.dumps(vids_dict)
if not os.listdir(insight_dir+'data\\raw\\videos\\'):
with open(
insight_dir
+'data\\raw\\videos\\'
+ 'videos_json_1'
+ '.json','w') as file:
file.write(json_txt)
else:
with open(
insight_dir
+'data\\raw\\videos\\'
+ 'videos_json_' + str(len(os.listdir(insight_dir+'data\\raw\\videos\\'))+1)
+ '.json','w') as file:
file.write(json_txt)
return vids_dict
for c_id in channels_df['ChannelID'][49:]:
vids_dict = videos_of_channel_call(YT_client, c_id)
vids_list = vids_dict['items']
# Only keep channels that have more than 30 videos
if len(vids_list) < 30:
continue
else:
for vid in vids_list[0:30]:
vid_dict = {'VidID':vid['snippet']['resourceId']['videoId'],
'ChannelID':vid['snippet']['channelId'],
'VidTitle':vid['snippet']['title'],
'VidDescription':vid['snippet']['description'],
'VidPublished':vid['snippet']['publishedAt']
}
vids_df = vids_df.append(vid_dict, ignore_index=True)
#vids_df.to_csv(insight_dir+'data\\processed\\videos_top30_df.csv')
df_comms_list = []
#%%
#if lastest_vid:
# strt = vids_df[vids_df['VidID'] == lastest_vid].index[0]
#else:
strt = -1
# Now for each video, need to get the comments.
for v_id in vids_df['VidID'][strt+1:]:
lastest_vid = v_id
vid_num_str = str(vids_df[vids_df['VidID'] == lastest_vid].index[0])
num_vids_str = str(len(vids_df['VidID']))
print('Fetching comments for video '+vid_num_str+' of '+num_vids_str)
try:
comm_pg1 = cm.get_comments_page(YT_client,
v_id,
'relevance',
pagetok=None)
comm_pg2 = cm.get_comments_page(YT_client,
v_id,
'relevance',
pagetok=comm_pg1['nextPageToken'])
except:
continue
json_txt = json.dumps(comm_pg1)
if not os.listdir(insight_dir+'data\\raw\\comments\\'):
with open(
insight_dir
+'data\\raw\\comments\\'
+ 'comments_json_1'
+ '.json','w') as file:
file.write(json_txt)
else:
with open(
insight_dir
+'data\\raw\\comments\\'
+ 'comments_json_' + str(len(os.listdir(insight_dir+'data\\raw\\comments\\'))+1)
+ '.json','w') as file:
file.write(json_txt)
json_txt = json.dumps(comm_pg2)
with open(
insight_dir
+'data\\raw\\comments\\'
+ 'comments_json_' + str(len(os.listdir(insight_dir+'data\\raw\\comments\\'))+1)
+ '.json','w') as file:
file.write(json_txt)
thread_list = comm_pg1["items"] + comm_pg2['items']
# Create Dataframe from the comment dictionary that results
cols = ['CommID'] + list(thread_list[0]['snippet']["topLevelComment"]['snippet'].keys()) + ['parentId']
df_comms = pd.DataFrame(columns=cols)
for item in thread_list:
data = {"CommID": item['id'], 'parentId': 0}
data.update(item['snippet']["topLevelComment"]['snippet'])
df_comms = df_comms.append(data,ignore_index=True)
if 'replies' in item.keys():
for reply in item['replies']['comments']:
data = {"CommID": reply['id']}
data.update(reply['snippet'])
df_comms = df_comms.append(data,ignore_index=True)
df_comms_list.append(df_comms)
#if comms_df.empty:
comms_df = pd.DataFrame(columns=df_comms_list[0].columns)
#else:
# pass
for df in df_comms_list:
comms_df = comms_df.append(df, ignore_index=True)
comms_df.to_csv(insight_dir+'data\\processed\\comments_df3.csv')
|
[
"36747983+ronaldmaj@users.noreply.github.com"
] |
36747983+ronaldmaj@users.noreply.github.com
|
e9007fc49d13f5d5c05e56540b2d43ef7c47a409
|
f647c6fb984b6e93977bb56a9a4533b8d47e6644
|
/lib/form_szone.py
|
cb1d487631fc14327e35774f177cfca5598b2b59
|
[] |
no_license
|
vdsmirnov52/wt000
|
7a88fcf29e5f786b8f2b0956b4a10ae68c0e32a6
|
0dd8ead0a73ed0f3f7f2f8c5302dff0071392570
|
refs/heads/master
| 2021-04-26T05:50:59.554131
| 2020-08-06T10:14:02
| 2020-08-06T10:14:02
| 79,928,565
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,269
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys, time
import urllib
import json
LIBRARY_DIR = r"/home/smirnov/Wialon/lib/"
sys.path.insert(0, LIBRARY_DIR)
'''
[ /* массив, с данными о геозонах */
{
"n":<text>, /* название геозоны*/
"d":<text>, /* описание */
"id":<long>, /* ID геозоны внутри ресурса/учётной записи */
"rid":<long>, /* ID ресурса/учётной записи*/
"t":<byte>, /* тип: 1 - линия, 2 - полигон, 3 - круг */
"w":<uint>, /* толщина линии или радиус круга */
"f":<uint>, /* флаги геозон (см. ниже) */
"c":<uint>, /* цвет (ARGB) */
"tc":<uint>, /* цвет надписи RGB */
"ts":<uint>, /* размер шрифта */
"min":<uint>, /* отображать на карте начиная с этого масштаба */
"max":<uint>, /* отображать на карте до этого масштаба */
"i":<ushort>, /* контрольная сумма изображения (CRC16) */
"path":<text>, /* укороченный путь до дефолтной иконки */
"ar":<double>, /* площадь */
"pr":<double>, /* периметр */
"libId":<uint>, /* id библиотеки иконок, 0 - id дефолтной библиотеки */
"b":{ /* границы */
"min_x":<double>, /* минимальная долгота */
"min_y":<double>, /* минимальная широта */
"max_x":<double>, /* максимальная долгота */
"max_y":<double>, /* максимальная широта */
"cen_x":<double>, /* долгота центра */
"cen_y":<double> /* широта центра */
},
"p":[ /* массив точек геозоны */
{
"x":<double>, /* долгота */
"y":<double>, /* широта */
"r":<uint> /* радиус */
},
...
],
"ct":<uint>, /* время создания */
"mt":<uint> /* время последнего изменения */
},
...
]
'''
rem = '''~div_left|<pre>
Параметры
Название Описание + Комментарии
itemId ID ресурса
col массив идентификаторов геозон
flags флаги, определяющие формат возвращаемого JSON необязательный, по умолчанию 0x1С
Флаги «flags»:
Значение Описание
0x01 площадь
0X02 периметр
0X04 границы и координаты центра
0X08 точки
0X10 базовые свойства
</pre>'''
widget = """~div_right|
<div class="grey" style="background-color: #dde; width: 652px; padding: 4px; margin: 4px; top: 54px;">
<div class="box" style="background-color: #ccd;">
<table width="100%"><tr><td class='tit'>Геозоны - подробная информация</td>
<td align="right">
<input class="butt" value="View Zones" onclick="set_shadow('view_szones');" type="button" title='Список геозон' />
<input class="butt" value="Search Zone" onclick="set_shadow('search_szone');" type="button" title='Искать геозону' />
<input class="butt" value="Reload" onclick="set_shadow('form_szone');" type="button" title='Обновить форму' />
<input class="butt" value="Close" onclick="$('#widget').html('Close');" type="button" title='' />
</td></tr></table>
</div>
<dt><span class='tit'> itemId </span> ID ресурса/учётной записи</dt>
<dd><input type='text' name='itemId'> </dd>
<dt><span class='tit'> col </span> массив идентификаторов геозон </dt>
<dd><textarea name='col' maxlength=256 rows=1 cols=80>%s</textarea> </dd>
<dt><span class='tit'> flags </span> флаги, определяющие формат возвращаемого JSON </dt>
<dd>
<input type='checkbox' name='flag_001' /> площадь </br>
<input type='checkbox' name='flag_002' /> периметр </br>
<input type='checkbox' name='only_poligon' /> полигон </br>
<input type='checkbox' name='flag_004' checked /> границы и координаты центра </br>
<input type='checkbox' name='flag_008' checked /> точки и полигон </br>
<input type='checkbox' name='flag_016' checked /> базовые свойства </br>
</dd>
FORM
<div id="set_vals" style="border: 1px solid #bbc; color: #668; min-height: 100px">set_vals</div>
</div>
</div>
"""
def dom (iddom, request):
print "~widget|"
print "~%s|" % iddom
print "<table border=0><tr style='vertical-align: top;'><td id='td_left'></td><td id='td_right'></td></tr></table>"
print "~td_left|<div id='div_left' style='border: 1px solid rgb(187, 187, 204); color: rgb(12, 12, 36); overflow: auto; min-width: 700px;'> ERROR </div>"
print "~td_right|<div id='div_right' > ", request, " </div>"
print "~eval|$('#div_left').css({'height': (-233 + document.documentElement.clientHeight) +'px', 'overflow': 'auto'});"
print "~eval|$('#div_left').css({'width': (-700 + document.documentElement.clientWidth) +'px', 'overflow': 'auto'});"
print rem
print widget
serr = lambda txt: "<span class='bferr'> %s </span>" % txt
def search_szone(iddom, request):
import twlp
ztype = {1: 'линия', 2: 'полигон', 3: 'круг'}
print "~set_vals|"
if not (request.has_key('itemId') and request['itemId'].isdigit()):
print serr ("Отсутствует или невернр задан 'itemId'.")
return
cols = []
if not (request.has_key('col') and request['col'].strip()[0].isdigit()):
for j in xrange(255): cols.append(j)
else:
print request['col'].strip().split()
for js in request['col'].strip().split():
js = js.replace(',', '').replace(';', '').strip()
print js
if js and js.isdigit(): cols.append(int(js))
flags = 0
for k in request.keys():
if 'flag_' in k[:5] and request[k] == 'on':
flags += int (k[5:])
if flags == 0: flags = -1
print '<hr />'
itemId = int(request['itemId'])
data = {'sid': request['wsid'], 'svc': 'resource/get_zone_data', 'params': {'itemId': itemId, 'col': cols, 'flags': flags}}
# print data
fres, sres = twlp.requesr(data)
if not fres:
print serr(sres), str(data)
return
print "~%s|" % iddom
# print sres
print '<table>'
for i in sres:
print '<tr class="tit"><td>', i['rid'], i['id'], '</td><td>', i['n'].encode('UTF-8'), '</td><td>', i['d'].encode('UTF-8')
print time.strftime("</td><td>mt: %Y.%m.%d %T", time.localtime (i['mt']))
# print float(i['c'])/0xff000000
op = float(i['c'])/0xff000000
print op
print "<span style='background-color: #%x; opacity: %.2f;, color: #%x;'> nin: %s, max: %s </span>" % (0xffffff & int(i['c']), op, int(i['tc']), i['min'], i['max'])
if i['t'] in ztype.keys():
print ztype[i['t']]
else: print '###'
out_filds (i, flags)
if flags == -1:
print '<tr><td> </td><td colspan=3>'
if i.has_key('b') and i.has_key('p'):
prn_svg (i['b'], i['p'], i['c'], i['n'].encode('UTF-8'), i['t'])
print '</td></tr>'
print '</table>'
from math import sin, cos, tan, pi
def prn_svg (b, points, c, name, ztype = 1, k = 20000):
# {u'min_x': 43.5576794388, u'min_y': 56.821850924, u'max_x': 43.5630507866, u'max_y': 56.8249534109, u'cen_x': 43.5603651127, u'cen_y': 56.8234021675}
Rz = (6378245.0+6356863.019)/2 # Радиус земли
min_x = float(b['min_x'])
min_y = float(b['min_y'])
max_x = float(b['max_x'])
max_y = float(b['max_y'])
if k*(max_x - min_x) > 1100: # Нормализовать X к 1100
k = 1100/(max_x - min_x)
print '#'*33, "Xk:", int(k), "<br />"
if k*(max_y - min_y) > 500: # Нормализовать Y к 500
k = 500/(max_y - min_y)
print '#'*33, "Yk:", int(k), "<br />"
K = k*cos(pi * (min_x+max_x)/360)
w = int(K*(max_x - min_x))
h = int(k*(max_y - min_y))
cl = 0xffffff & int(c)
# print Rz, Rz*cos(pi * min_x/180), (max_x-min_x), (max_x-min_x)*cos(pi * min_x/180), K
print "<svg width=%dpx height=%dpx fill='#%x' border=1px xmlns='http://www.w3.org/2000/svg'>" %(w, h, cl)
print "<text x=10 y=30 font-size=13>%s</text>" % name
pp = []
if ztype in [1,2]:
for p in points:
x = int(K * (float(p['x']) - min_x))
y = int(k * (max_y - float(p['y']))) # - min_y))
pp.append('%d %d' % (x, y))
if ztype == 1: # Линия
print """<path id="Line" fill="none" stroke="#%x" stroke-width="5" opacity="0.4" d="M %s" />""" % (cl, 'L '.join(pp))
else: # Полигон
print """<polygon stroke="#868686" stroke-width="1" fill="#%x" opacity="0.4" points="%s"></polygon>""" % (cl, ' '.join(pp))
else: # Круг
r = int(h/2) #points[0]['r'])
# print points
print "<circle r='%d' cx='%d' cy='%d' fill='#%x' opacity='0.4'></circle>" % (r, r, r, cl)
print "</svg>"
def out_filds (js, flags):
if flags == -1: return
if not flags & 0x0f: return
print '<tr><td> </td><td colspan=3>'
if flags & 1: # площадь
print "площадь:", js['ar'], '<br />'
if flags & 2: # периметр
print "периметр:", js['pr'], '<br />'
if flags & 4: # границы и координаты центра
print "границы:", js['b'], '<br />'
if flags & 8: # точки
for p in js['p']:
print p, '<br />'
# print '</td></tr>'
def view_szones (iddom, request):
import twlp
print "~%s|" % iddom
params = {'spec': {'propType': 'sys_name', 'sortType': 'sys_name', 'itemsType': 'avl_resource', 'propName': '*', 'propValueMask': '*'}, 'force': 1, 'to': 0, 'from': 0, 'flags': -1,}
data = {'sid': request['wsid'], 'svc': 'core/search_items' , 'params': params}
fres, sres = twlp.requesr(data)
if not fres:
print serr(sres), str(data)
return
# print sres['items'][0] #.keys()
zlids = {}
zgids = {}
for i in sres['items']:
if i.has_key('zg') and i['zg']:
zgids[i['id']] = i
elif i.has_key('zl') and i['zl']:
zlids[i['id']] = i
else: print i['id']
# print zgids[371]['zg']
print zlids.keys(), zgids.keys()
print 'totalItemsCount:', sres['totalItemsCount'], len(zlids), len(zgids), '<hr />'
print "<table cellpadding=2 cellspacing=0><tr><th>Id</th><th>Наименование</th><th>Описание</th><th></th></tr>"
for i in zgids.keys():
item = zgids[i]
pitem (item)
for i in zlids.keys():
pitem (zlids[i])
print "</table>"
def pitem (item):
print "<tr class='mark tit'><td>", item['id'], "</td><td>", item['nm'].encode('UTF-8'), "</td><td> item </td><td></td></tr>" #, item['d'].encode('UTF-8'), "</td><td></td></tr>"
for k in item['zg'].keys():
print "<tr><td>", k, "</td><td>", item['zg'][k]['n'].encode('UTF-8'), "</td><td>", item['zg'][k]['d'].encode('UTF-8'), "</td><td>"
print item['zg'][k]['zns']
print "</td><td></td></tr>"
'''
for j in item['zg'][k]['zns']:
print "<tr><td>", j, "</td><td>"
# print zlids.keys()
'''
def pzone (zd):
return str(zd)
def ajax (request):
shstat = request['shstat']
if shstat == 'search_szone': ### Геозоны - подробная информация
search_szone('div_left', request)
elif shstat == 'view_szones':
view_szones('div_left', request)
else: print "~eval|alert ('form_szone: Unknown shstat: [%s]!');" % request ['shstat']
|
[
"vdsmitnov52@gmail.com"
] |
vdsmitnov52@gmail.com
|
d17e8ff8187c3921897883120db844b538a4b52e
|
a40950330ea44c2721f35aeeab8f3a0a11846b68
|
/OpenCV-python读取监控/图片视频车牌识别.py
|
dee934f727046acfa89a8815d3f66fb583122548
|
[] |
no_license
|
huang443765159/kai
|
7726bcad4e204629edb453aeabcc97242af7132b
|
0d66ae4da5a6973e24e1e512fd0df32335e710c5
|
refs/heads/master
| 2023-03-06T23:13:59.600011
| 2023-03-04T06:14:12
| 2023-03-04T06:14:12
| 233,500,005
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,094
|
py
|
from hyperlpr import *
import cv2
class CarNumRecognition(object):
def __init__(self, image_path, video_path):
self._image_path = image_path
self._video_path = video_path
def video(self):
print('[INFO] starting video stream')
stream = cv2.VideoCapture(self._video_path)
while 1:
grabbed, frame = stream.read()
if not grabbed:
print('NO DATA')
break
res = HyperLPR_plate_recognition(frame)
print(res)
key = cv2.waitKey(5) & 0xFF
if key == ord('q'):
break
stream.release()
cv2.destroyAllWindows()
def image(self):
print('[INFO] starting image')
image = cv2.imread(self._image_path)
res = HyperLPR_plate_recognition(image)
print(res)
if __name__ == '__main__':
_video = '/Users/huangkai/Desktop/test.mp4'
_image = '/Users/huangkai/Desktop/1.png'
_test = CarNumRecognition(video_path=_video, image_path=_image)
_test.video()
# _test.image()
|
[
"443765159@qq.com"
] |
443765159@qq.com
|
1c4b88485adf158db1f4ac7a804da5e91357ddd8
|
b9c6f065e37fe7b8fb3a1f23b65b40f30833dfb3
|
/set_build_no.py
|
b7e4b0d86de0b7175fdbc99bedb274f22e562b8e
|
[
"Apache-2.0"
] |
permissive
|
BRHN-11/Crypt-Server
|
be155f3cafc2a5626f805dac3ba5171a1fe9d90d
|
266185e5c3acd2af0ff2cf4e0ce22c849680dc64
|
refs/heads/master
| 2023-08-30T21:47:25.857954
| 2021-01-15T18:24:58
| 2021-01-15T18:24:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
#!/usr/bin/python
import os
import plistlib
import subprocess
current_version = "3.1.0"
script_path = os.path.dirname(os.path.realpath(__file__))
# based on http://tgoode.com/2014/06/05/sensible-way-increment-bundle-version-cfbundleversion-xcode
print("Setting Version to Git rev-list --count")
cmd = ["git", "rev-list", "HEAD", "--count"]
build_number = subprocess.check_output(cmd)
# This will always be one commit behind, so this makes it current
build_number = int(build_number) + 1
version_number = "{}.{}".format(current_version, build_number)
data = {"version": version_number}
plist_path = "{}/fvserver/version.plist".format(script_path)
plistlib.writePlist(data, plist_path)
|
[
"noreply@github.com"
] |
BRHN-11.noreply@github.com
|
b6afeb6ebadd6ad349db47589435aa408c0707ae
|
6e3532291c327a9be7ea15fa37a673eefbf76a11
|
/Chapter_6/pizza.py
|
9072adf6a3d6884b98ac507ee9793055fc1d71e6
|
[] |
no_license
|
Denzaaaaal/python_crash_course
|
bc9aa8029d975bec8417d76e0bcdbd4480444cb0
|
d944893a575a4ca6ea1dec0786602d0c4a82e88d
|
refs/heads/master
| 2022-09-18T18:46:01.141317
| 2020-06-04T11:07:55
| 2020-06-04T11:07:55
| 266,353,585
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
# Store information about a pizza being ordered
pizza = {
'crust': 'thick',
'toppings': ['mushrooms','extra cheese']
}
# Summerising the order
print (f"You ordered a {pizza['crust']}-crust pizza"
"with the following toppings:")
for topping in pizza['toppings']:
print (f"\t{topping}")
|
[
"denzeldouglas@protonmail.com"
] |
denzeldouglas@protonmail.com
|
abd622ab70f46e69735ea3d7762b678762558332
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/evergreen.py
|
886488cecc8d2463d953d0554e5a5a96fb0d02c6
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619
| 2015-09-23T11:54:06
| 2015-09-23T11:54:06
| 42,749,205
| 2
| 3
| null | 2015-09-23T11:54:07
| 2015-09-18T22:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 667
|
py
|
ii = [('MarrFDI.py', 1), ('CoolWHM2.py', 1), ('KembFFF.py', 1), ('ProuWCM.py', 2), ('ShawHDE.py', 1), ('MartHSI2.py', 2), ('LeakWTI2.py', 2), ('LeakWTI3.py', 1), ('PeckJNG.py', 1), ('RoscTTI3.py', 1), ('RoscTTI2.py', 1), ('CoolWHM.py', 1), ('LyelCPG.py', 2), ('GilmCRS.py', 3), ('CrocDNL.py', 1), ('MedwTAI.py', 1), ('LeakWTI4.py', 4), ('LeakWTI.py', 1), ('MedwTAI2.py', 2), ('HowiWRL2.py', 3), ('MartHRW.py', 1), ('FitzRNS4.py', 7), ('CoolWHM3.py', 2), ('FitzRNS.py', 16), ('RoscTTI.py', 4), ('ClarGE3.py', 1), ('MartHRW2.py', 2), ('FitzRNS2.py', 3), ('HogaGMM2.py', 3), ('MartHSI.py', 1), ('LyelCPG3.py', 1), ('BeckWRE.py', 1), ('DibdTBR.py', 2), ('ClarGE4.py', 1)]
|
[
"prabhjyotsingh95@gmail.com"
] |
prabhjyotsingh95@gmail.com
|
86f71b6a21c37f5a888d31eb1350d8dc414a6eab
|
4429558655a0b96f3affb4501b560bffef77584f
|
/solution/dataset.py
|
20210edec9338f26065309c322cdb678b33ac0eb
|
[] |
no_license
|
MarkerViktor/ImprovadoTestTask
|
70b882be0049161f0b7db26007b6fd22e4052a49
|
aa7e22ad700c94766a7bbb259191d1522a392d3d
|
refs/heads/master
| 2023-06-25T20:42:18.802734
| 2021-07-22T10:35:22
| 2021-07-22T10:35:22
| 387,648,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,683
|
py
|
from itertools import chain
from io import StringIO
from collections import Collection
from typing import IO, Union, Iterator, Callable, Iterable
Schema = dict[str, type]
IndicesDict = dict[str, int]
RowValue = Union[str, int, float, bool]
Row = tuple[RowValue, ...]
RowDict = dict[str, RowValue]
GrouperOperator = Callable[[RowValue, RowValue], RowValue]
class Dataset(Collection):
def __init__(self, name: str, schema: Schema, use_defaults_for_kwargs: bool = False):
self._schema: Schema = schema
self._indices: IndicesDict = {header: index for index, header in enumerate(self.schema.keys())}
self._name: str = name
self._data: list[Row] = []
self._use_defaults_for_kwargs = use_defaults_for_kwargs
def add_row(self, *args: RowValue, **kwargs: RowValue):
"""
Add new row to the dataset converting values to suitable schema's types if possible.
If any kwargs provided args will be ignored.
Kwargs keys which aren't in the schema will be ignored.
"""
values: list[RowValue] = []
if kwargs:
# Check kwargs completeness and add default values, if some missing and self._use_default_for_kwargs is set
missing_keys = self.schema.keys() - kwargs.keys()
if len(missing_keys) > 0:
if self._use_defaults_for_kwargs:
for key in missing_keys:
kwargs[key] = self.schema[key]() # Call type constructor without arguments
else:
raise TypeError(f"Missing required keyword arguments: {missing_keys}", kwargs)
# Try to fill values in schema order
for header, type_ in self.schema.items():
try:
values.append(type_(kwargs[header]))
except ValueError:
raise ValueError(f"Can't convert keyword argument «{header}» to type {type_.__name__}.", kwargs)
except TypeError:
raise TypeError(f"Missing required keyword argument «{header}».", kwargs)
elif args:
# Check given args completeness
if len(args) < len(self.schema):
raise TypeError(f"Missing required positional arguments: {list(self.schema.keys())[len(args) - 1:]}")
# Try to fill values in provided order
for index, type_ in enumerate(self.schema.values()):
try:
values.append(type_(args[index]))
except ValueError:
raise ValueError(f"Can't convert {index+1}th positional argument to type {type_.__name__}.", args)
except TypeError:
raise TypeError(f"Missing required {index+1}th positional argument.", args)
if not values:
raise ValueError("Positional or keyword arguments must be provided.")
self._data.append(tuple(values))
def from_iterable(self, *row_iters: Iterable[Union[Row, RowDict]], dict_row: bool = False) -> list[Exception]:
"""
Extend dataset by elements from given row iterator.
Iterator must yield only tuples or only dicts depending on dict_row arg.
Returns exceptions caught during adding.
"""
errors = []
for row in chain(*row_iters):
try:
if dict_row:
self.add_row(**row)
else:
self.add_row(*row)
except (TypeError, ValueError) as e:
errors.append(e)
return errors
def sort(self, *headers: str) -> None:
"""Sort dataset rows by given headers. If no headers provided sorts by all headers."""
if not headers:
self._data.sort()
else:
self._data.sort(key=lambda row: tuple(self._by_headers(row, headers)))
def group_by(self, *grouping_headers: str, operator: GrouperOperator) -> None:
"""
Group dataset rows by given headers.
Like SQL «GROUP BY ...» all group members will subject to operator call in order.
"""
other_headers = sorted(self.headers - set(grouping_headers)) # Not grouping headers
grouping_other_pairs: Iterator[tuple[tuple[RowValue, ...], list[RowValue]]] = \
((tuple(self._by_headers(r, grouping_headers)), list(self._by_headers(r, other_headers))) for r in self)
groups_dict: dict[tuple[RowValue, ...], list[RowValue]] = {}
for grouping_tuple, other_list in grouping_other_pairs:
if values_list := groups_dict.get(grouping_tuple):
for index, value in enumerate(other_list):
values_list[index] = operator(values_list[index], value)
else:
groups_dict[grouping_tuple] = other_list
self._schema = {header: self.schema[header] for header in chain(grouping_headers, other_headers)}
self._indices: IndicesDict = {header: index for index, header in enumerate(self.schema.keys())}
self._data = [(*grouping, *other) for grouping, other in groups_dict.items()]
def write_as_table(self, io_obj: IO, separator: str = '\t'):
"""Write rows to a file-like object. Row elements dividing by separator."""
io_obj.write(separator.join(self.headers) + '\n')
for row in self:
io_obj.write(separator.join(str(element) for element in row))
io_obj.write('\n')
@property
def headers(self):
return self.schema.keys()
@headers.setter
def headers(self, headers: tuple[str]):
if len(headers) == len(self.headers):
self._schema = dict(zip(headers, self.schema.values()))
else:
raise IndexError(f"Incorrect number of headers is given (must be {len(self.headers)}).")
@property
def schema(self) -> Schema:
return self._schema
def dict_iter(self) -> Iterator[RowDict]:
headers = self.headers
for row in self:
yield dict(zip(headers, row))
def __repr__(self):
buffer = StringIO()
buffer.write(f"Dataset «{self._name}»:\n")
self.write_as_table(buffer)
return buffer.getvalue()
def __iter__(self):
return iter(self._data)
def __contains__(self, item):
return item in self._data
def __len__(self):
return len(self._data)
@staticmethod
def merge_schemas(*datasets: 'Dataset', intersection: bool = True):
"""Merge given schema dicts to new one with checking types."""
schemas = [ds.schema for ds in datasets]
new_schema = {}
if intersection:
new_schema = schemas[0]
for schema in schemas:
if intersection:
# Get keys which are in new_schema and adding_schema
common_headers = schema.keys() & new_schema.keys()
# Check type difference for a single header
for header in common_headers:
if new_schema[header] != schema[header]:
raise TypeError(f"There are dataset row schemas with different types "
f"«{new_schema[header].__name__}» and «{schema[header].__name__}» for "
f"single header «{header}».")
new_schema = dict(schema.items() & new_schema.items())
else:
for adding_schema in schemas:
# Get header->type pairs which not in new_schema
diff_pairs = adding_schema.items() - new_schema.items()
# Check type difference for a single header
for header, type_ in diff_pairs:
if exist_type := new_schema.get(header):
if type_ != exist_type:
raise TypeError(f"There are dataset row schemas with different types "
f"«{exist_type.__name__}» and «{type_.__name__}» for single header «{header}».")
new_schema |= diff_pairs
# Sort schema by headers
new_schema = dict(sorted(new_schema.items()))
return new_schema
def _by_headers(self, row: Row, headers: Iterable[str]) -> Iterator[RowValue]:
"""
Make iterator over row according to provided headers.
If the dataset hasn't required header None will be yielded.
"""
for header in headers:
index = self._indices.get(header)
if index is not None:
yield row[index]
else:
yield None
|
[
"MarkerViktor@outlook.com"
] |
MarkerViktor@outlook.com
|
c7420131506a4d2e390cb9d8df21223f481e8951
|
8a6ea916051ecdf3767e3d1f1e2b69d7d1db7315
|
/NLPwithML/DL4INdustry/protoBuf.py
|
b5e245f831feeb339769354dabccdfd8d654d7df
|
[] |
no_license
|
LokeshKD/MachineLearning
|
5d7b751dbcb5d08e938af24748d402d1da121fc7
|
f16d1b4d3ee35703a504f73c242c0ec02b0ba360
|
refs/heads/master
| 2023-06-04T22:07:56.903269
| 2021-06-22T06:46:57
| 2021-06-22T06:46:57
| 359,021,896
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,823
|
py
|
# Usage of ProtoBufs in TF.
## convert dict to tf.train.Example object.
## Features to Example.
import tensorflow as tf
features = tf.train.Features(feature=f_dict) # f_dict is a dict
ex = tf.train.Example(features=features)
print(repr(ex))
## output
'''
features {
feature {
key: "age"
value {
int64_list {
value: 12
}
}
}
feature {
key: "weight"
value {
float_list {
value: 88.19999694824219
}
}
}
}
'''
#######
## Feature
import tensorflow as tf
int_f = tf.train.Feature(
int64_list=tf.train.Int64List(value=[1, 2]))
print(repr(int_f) + '\n')
float_f = tf.train.Feature(
float_list=tf.train.FloatList(value=[-8.2, 5]))
print(repr(float_f) + '\n')
bytes_f = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b'\xff\xcc', b'\xac']))
print(repr(bytes_f) + '\n')
str_f = tf.train.Feature(
bytes_list=tf.train.BytesList(value=['joe'.encode()]))
print(repr(str_f) + '\n')
### output
'''
int64_list {
value: 1
value: 2
}
float_list {
value: -8.199999809265137
value: 5.0
}
bytes_list {
value: "\377\314"
value: "\254"
}
bytes_list {
value: "joe"
}
'''
###
import tensorflow as tf
f_dict = {
'int_vals': int_f,
'float_vals': float_f,
'bytes_vals': bytes_f,
'str_vals': str_f
}
features = tf.train.Features(feature=f_dict)
print(repr(features))
### output
'''
feature {
key: "bytes_vals"
value {
bytes_list {
value: "\377\314"
value: "\254"
}
}
}
feature {
key: "float_vals"
value {
float_list {
value: -8.199999809265137
value: 5.0
}
}
}
feature {
key: "int_vals"
value {
int64_list {
value: 1
value: 2
}
}
}
feature {
key: "str_vals"
value {
bytes_list {
value: "joe"
}
}
}
'''
|
[
"i.lokesh@gmail.com"
] |
i.lokesh@gmail.com
|
30f869b3cf399036b70c382615c7c4de7189ea37
|
60d15185ec975d2beb6bf82b3c346c1083106c28
|
/pylibs/stress_tests/network_forming.py
|
858df10777880a3ab8250340e978484471c57304
|
[
"BSD-3-Clause"
] |
permissive
|
bee-mcc/ot-ns
|
d9dedb70f8946994a474c6dc883e581300625ef9
|
86df24e98811193540947608e1de663859f0a49a
|
refs/heads/main
| 2023-07-23T20:59:27.248025
| 2021-09-10T03:46:36
| 2021-09-10T03:46:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,723
|
py
|
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OTNS Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Network Forming Stress Test:
# Different number of nodes form networks (a single partition) and measure the network forming delay.
# Topology:
# 1x1 Routers ~ 7x7 Routers
# Fault Injections:
# None
# Pass Criteria:
# Network forming time is less than corresponding time limits
#
import os
from typing import Sequence
from BaseStressTest import BaseStressTest
XGAP = 100
YGAP = 100
RADIO_RANGE = int(XGAP * 1.5)
MIN_N = 1
MAX_N = 7
REPEAT = int(os.getenv('STRESS_LEVEL', '1')) * 3
EXPECTED_MERGE_TIME_MAX = [
None, 3, 6, 12, 20, 50, 100, 200
]
class StressTest(BaseStressTest):
SUITE = 'network-forming'
def __init__(self):
headers = ['Network Size', 'Formation Time 1']
for i in range(2, REPEAT + 1):
headers.append(f'FT {i}')
super(StressTest, self).__init__("Network Formation Test", headers)
def run(self):
# self.ns.config_visualization(broadcast_message=False)
for n in range(MIN_N, MAX_N + 1):
durations = []
for i in range(REPEAT):
secs = self.test_n(n)
durations.append(secs)
self.result.append_row(f'{n}x{n}', *['%ds' % d for d in durations])
avg_dura = self.avg_except_max(durations)
self.result.fail_if(avg_dura > EXPECTED_MERGE_TIME_MAX[n],
f"""{n}x{n} average formation time {avg_dura} > {
EXPECTED_MERGE_TIME_MAX[n]}""")
@staticmethod
def stdvar(nums: Sequence[float]):
ex = sum(nums) / len(nums)
s = 0
for i in nums:
s += (i - ex) ** 2
return float(s) / len(nums)
def test_n(self, n):
self.reset()
for r in range(n):
for c in range(n):
self.ns.add("router", 50 + XGAP * c, 50 + YGAP * r, radio_range=RADIO_RANGE)
secs = 0
while True:
self.ns.go(1)
secs += 1
pars = self.ns.partitions()
if len(pars) == 1 and 0 not in pars:
break
return secs
if __name__ == '__main__':
StressTest().run()
|
[
"noreply@github.com"
] |
bee-mcc.noreply@github.com
|
fd05e0da7a8d0805078cb0666536af63af4db1a5
|
5c1bfe1d55027789cb5a55b458a143bd4e32a142
|
/PlacementQuestions/EqualZeroOneSubSequence.py
|
bfce65dfc36bc8b05b29ac754f5b2eebde2a7626
|
[] |
no_license
|
alfa-abhi/Algorithms
|
e9487293b92162a2cf068137370c82574ebe3086
|
746bbfacebee57bbfa4883873a5413ed6aa02df1
|
refs/heads/master
| 2020-12-30T14:19:53.299617
| 2017-09-27T14:55:05
| 2017-09-27T14:55:05
| 91,309,057
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
# input: 11010011
# output: [1, 1, 0, 1, 0, 0]
seq = map(int, list(raw_input()))
length = len(seq)
zero = seq.count(0)
one = length - zero
equator = min(zero, one)
counter = 0
oneCount = 0
zeroCount = 0
subSequence = []
while counter < length:
if seq[counter] == 1:
if oneCount < equator:
subSequence.append(1)
oneCount += 1
else:
if zeroCount < equator:
subSequence.append(0)
zeroCount += 1
counter += 1
print subSequence
|
[
"alfa.abhi1996@gmail.com"
] |
alfa.abhi1996@gmail.com
|
78f4aace9123fe4006a521cee5e8250675624545
|
eec6ae08a76266e9535d40b9bb32dfca3d51d24f
|
/urllib_test2.py
|
372e7b9983c80d87a64b0527ea95368c6af23c60
|
[] |
no_license
|
jacklee19860111/Python_Test
|
9d045eee4b04f2345824c9829cafcb82e3d99462
|
0734f238efaebb90b81d8aeef5f2acf0bcbf3723
|
refs/heads/master
| 2021-01-11T22:18:16.432421
| 2017-01-15T16:04:58
| 2017-01-15T16:04:58
| 78,944,853
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 449
|
py
|
#-*-coding:utf-8-*-
import urllib
def callback(a,b,c):
"""
@a:是目前为止传递的数据块数量
@b:是每个数据块的大小,单位的byte
@c:是远程文件的大小
"""
download_progress = 100.0 * a * b / c
if download_progress > 100:
download_progress = 100
print "%.2f%%" % download_progress
url = "http://www.163.com"
urlpath="/home/git/home163.html"
urllib.urlretrieve(url,urlpath,callback)
|
[
"lijihou_0@126.com"
] |
lijihou_0@126.com
|
0402b3a4ef0c8bb5d3c578f2d1051cf79251b9ad
|
2187ad01c46764b5642054ab1345f76ef8fd7f88
|
/Module05/DE-101_Labs/AWS - Python Workshop/LearnPython/lab_1_hello_world.py
|
61915fc31029b1b442f6f3ac69ecd92ece89487e
|
[] |
no_license
|
achudinova/DE-101
|
8132604a260a0e4ec2511cf4181617d8923b1971
|
26741acae39f10d318eb9e8a1f84879a6f9bf917
|
refs/heads/main
| 2023-06-23T23:18:55.915527
| 2021-07-27T14:23:24
| 2021-07-27T14:23:24
| 385,186,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 174
|
py
|
# A function that returns hello world
def hello_world():
return 'hello world'
# Assign the hello_world() function to a variable.
greeting = hello_world()
print(greeting)
|
[
"anastasiushka995@gmail.com"
] |
anastasiushka995@gmail.com
|
01c119add1d00161240988b19ce3d83e9cb8db21
|
c93a0b82627679a904f792668880cefffe801353
|
/ml/m05_iris2_keras.py
|
09623045b0c1128bd04d0bf40141f5571a53ea9e
|
[] |
no_license
|
Eunah-Kim/keras
|
b52f29b90c858263c606b61877678d7223821f59
|
40f58f5aeb6d38db1b6c81455d0911bb547239f5
|
refs/heads/master
| 2020-12-18T18:29:02.334063
| 2020-02-26T09:20:40
| 2020-02-26T09:20:40
| 235,483,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,601
|
py
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import tensorflow as tf
# 1. 데이터 붓꽃 데이터 읽어 들이기
iris_data = pd.read_csv('./data/iris.csv', encoding='utf-8',
names=['a','b','c','d','y']) #, header=None)
# 붓꽃 데이터를 레이블과 입력 데이터로 분리하기
y = iris_data.loc[:, "y"]
x = iris_data.loc[:,["a","b","c","d"]]
encoder = LabelEncoder()
encoder.fit(y)
y = encoder.transform(y)
# y = y.replace("Iris-setosa",0)
# y = y.replace("Iris-virginica",1)
# y = y.replace("Iris-versicolor",2)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, train_size=0.8, shuffle=True )
from keras.utils import np_utils
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print(y_train)
# 모델 정의
model = Sequential()
model.add(Dense(10, activation='relu', input_shape=(4,)))
model.add(Dense(5))
model.add(Dense(3, activation='softmax'))
# 모델 학습
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['acc'])
model.fit(x_train, y_train, epochs=100, batch_size=10)
# 평가하기
y_pred = model.predict(x_test)
print(y_pred.shape)
# y_pred.reshape(30,3,1)
# y_pred = np.argmax(y_pred, axis=1)
print(y_pred)
loss, acc = model.evaluate(x_test, y_test)
print("\n 정답률: ", acc)
|
[
"keunah1016@naver.com"
] |
keunah1016@naver.com
|
faa557a97a5bf7715dcab61f03ac74715d01584c
|
a39e85f1e6fbf245d84737933f676791a6efbf86
|
/Scripts/twistd-script.py
|
3a389b3da2ec4fc28eb81e84fecc5bb557771444
|
[] |
no_license
|
harshivvp/PythonWebDev-VEnv
|
4b025af37c4903119b70a61bc15d26eab6c9ac9a
|
8ce13f3bc2af7a61922336b79aaeb6d23eae3aa2
|
refs/heads/master
| 2021-05-02T10:50:06.822469
| 2018-02-12T13:23:38
| 2018-02-12T13:23:38
| 120,762,014
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 412
|
py
|
#!e:\harshiv\projects\myvenv\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==17.9.0','console_scripts','twistd'
__requires__ = 'Twisted==17.9.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==17.9.0', 'console_scripts', 'twistd')()
)
|
[
"pandyaharshiv@gmail.com"
] |
pandyaharshiv@gmail.com
|
373c9b5a52acc56a36e4c67a851a21594b807d20
|
f67b12ea1fdd4c98a50bee602038e6490ff659d0
|
/types.py
|
45842d0394022d197c72ee2684e1e5e4e83993ba
|
[] |
no_license
|
puhachalex/lesson1
|
c58ffedf6f32a1eb6b41b047324654cbf0f865f5
|
cfdde2a475b45f37a00d3c2deeab82ed109afdfc
|
refs/heads/master
| 2020-09-19T20:07:11.233613
| 2019-12-08T17:53:23
| 2019-12-08T17:53:23
| 224,286,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 296
|
py
|
name = input('Введите ваше имя: ').upper()
print(f'Привет, {name}! Как дела?')
##########
a = float(1) # ???
b = int(2.5) # ???
c = bool(1) # ???
d = bool('') # ???
e = bool(0) # ???
print(type(a))
print(type(b))
print(type(c))
print(type(d))
print(type(e))
|
[
"puhach.alex@gmail.com"
] |
puhach.alex@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.