blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7870158fe9657141779e126e13dfd712b9ed454e | daf9821160240a508a604595b60dd7052198d1cb | /testProcessor.py | 026160f0c7d0bb97d8a1699452b243cafce60a27 | [] | no_license | emmet-gingles/Python-FileProcessor | f7a3669be5c736d279a2ab9ec55c118213607ea3 | f50e9fbf61a30cef387a64951c9c818e57396766 | refs/heads/master | 2020-03-23T13:30:21.119394 | 2017-07-12T18:57:59 | 2017-07-12T18:57:59 | 141,622,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,356 | py | from process_changes import *
import unittest
# class to test the data processor functionality
class TestDataProcessor(unittest.TestCase):
# setup the test class
def setUp(self):
self.dataprocessor = DataProcessor()
self.dataprocessor.loadFile('changes_python.log')
# function to test getNumAuthors()
def testGetNumAuthors(self):
result = self.dataprocessor.getNumAuthors()
self.assertEqual(0,result)
# function to test getNumDays()
def testGetNumDays(self):
result = self.dataprocessor.getNumDays()
self.assertEqual(0,result)
# function to test getNumLines()
def testGetNumLines(self):
result = self.dataprocessor.getNumLines()
self.assertEqual(5255,result)
# function to test loadFile()
def testLoadFile(self):
# file that exists
result = self.dataprocessor.loadFile('changes_python.log')
self.assertEqual(None,result)
# file that doesnt exist
result = self.dataprocessor.loadFile('filename.log')
self.assertEqual(-1,result)
# function to test readLines()
def testReadLines(self):
result = self.dataprocessor.readLines()
self.assertEqual(None,result)
# now test getNumAuthors() and getNumDays() again
result = self.dataprocessor.getNumAuthors()
self.assertEqual(10,result)
result = self.dataprocessor.getNumDays()
self.assertEqual(5,result)
# function to test convertDayName()
def testConvertDayName(self):
result = self.dataprocessor.convertDayName('Mon')
self.assertEqual('Monday',result)
result = self.dataprocessor.convertDayName('Wed')
self.assertEqual('Wednesday',result)
result = self.dataprocessor.convertDayName('Fri')
self.assertEqual('Friday',result)
result = self.dataprocessor.convertDayName('Hello world')
self.assertEqual(None,result)
# function to test mostCommitsByAuthor()
def testMostCommitsByAuthor(self):
self.dataprocessor.readLines()
result = self.dataprocessor.mostCommitsByAuthor()
# split the result based on spaces
result = result.split()
# extract the author and commits from their respective positions
author = result[0]
commits = int(result[6])
self.assertEqual('viacheslav.vdovenko',author)
self.assertEqual(191,commits)
# function to test mostCommitsByDay()
def testMostCommitsByDay(self):
self.dataprocessor.readLines()
result = self.dataprocessor.mostCommitsByDay()
result = result.split()
day = result[0]
commits = int(result[9])
self.assertEqual('Thursday',day)
self.assertEqual(118,commits)
# function to test averageCommits()
def testAverageCommits(self):
self.dataprocessor.readLines()
result = self.dataprocessor.averageCommits()
result = result.split()
average = float(result[5])
self.assertEqual(42.2,average)
# run only from command line
if __name__ == '__main__':
# calls any functions that are preceded by test
unittest.main() | [
"noreply@github.com"
] | noreply@github.com |
96ebd867811570532d8fc6a0934d0475f42f77e1 | db903a5e99712d1f45e1d45c4d77537f811ae569 | /src/python/pants/option/global_options_test.py | ede2086b69991da2a0ecc2330dd8015392456304 | [
"Apache-2.0"
] | permissive | Hirni-Meshram2/pants | 777db8ea67c1fc66de46f0ab374ba4fff8597357 | e802d62cc68176aa66947a939c771b01f47d5425 | refs/heads/main | 2023-05-01T09:23:10.973766 | 2021-05-19T08:24:50 | 2021-05-19T08:24:50 | 366,021,656 | 0 | 2 | Apache-2.0 | 2021-05-10T11:38:07 | 2021-05-10T11:38:06 | null | UTF-8 | Python | false | false | 6,764 | py | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import sys
from pathlib import Path
from textwrap import dedent
import pytest
from pants.base.build_environment import get_buildroot
from pants.engine.environment import CompleteEnvironment
from pants.engine.internals.scheduler import ExecutionError
from pants.init.options_initializer import OptionsInitializer
from pants.option.global_options import (
DynamicRemoteExecutionOptions,
ExecutionOptions,
GlobalOptions,
)
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.testutil.option_util import create_options_bootstrapper
from pants.util.contextutil import temporary_dir
def create_dynamic_execution_options(
*,
initial_headers: dict[str, str],
token_path: str | None = None,
plugin: str | None = None,
local_only: bool = False,
) -> DynamicRemoteExecutionOptions:
args = [
"--remote-cache-read",
"--remote-execution-address=grpc://fake.url:10",
"--remote-store-address=grpc://fake.url:10",
f"--remote-store-headers={initial_headers}",
f"--remote-execution-headers={initial_headers}",
"--remote-instance-name=main",
]
if token_path:
args.append(f"--remote-oauth-bearer-token-path={token_path}")
if plugin:
args.append(f"--remote-auth-plugin={plugin}")
ob = create_options_bootstrapper(args)
env = CompleteEnvironment({})
_build_config, options = OptionsInitializer(ob).build_config_and_options(ob, env, raise_=False)
return DynamicRemoteExecutionOptions.from_options(options, env, local_only=local_only)
def test_dynamic_execution_options_remote_oauth_bearer_token_path() -> None:
with temporary_dir() as tempdir:
token_path = Path(tempdir, "token.txt")
token_path.touch()
token_path.write_text("my-token")
exec_options = create_dynamic_execution_options(
initial_headers={"foo": "bar"}, token_path=str(token_path)
)
assert exec_options.remote_store_headers == {"authorization": "Bearer my-token", "foo": "bar"}
assert exec_options.remote_execution_headers == {
"authorization": "Bearer my-token",
"foo": "bar",
}
def test_dynamic_execution_options_local_only() -> None:
# Test that local_only properly disables remote execution.
assert (
create_dynamic_execution_options(initial_headers={}, local_only=True)
== DynamicRemoteExecutionOptions.disabled()
)
def test_dynamic_execution_options_auth_plugin() -> None:
def compute_exec_options(state: str) -> DynamicRemoteExecutionOptions:
with temporary_dir() as tempdir:
# NB: For an unknown reason, if we use the same file name for multiple runs, the plugin
# result gets memoized. So, we use a distinct file name.
plugin_path = Path(tempdir, f"auth_plugin_{state}.py")
plugin_path.touch()
plugin_path.write_text(
dedent(
f"""\
from pants.option.global_options import AuthPluginState, AuthPluginResult
def auth_func(initial_execution_headers, initial_store_headers, options, **kwargs):
return AuthPluginResult(
state=AuthPluginState.{state},
execution_headers={{
**{{k: "baz" for k in initial_execution_headers}},
"exec": "xyz",
}},
store_headers={{
**{{k: "baz" for k in initial_store_headers}},
"store": "abc",
"store_url": options.for_global_scope().remote_store_address,
}},
instance_name="custom_instance",
)
"""
)
)
sys.path.append(tempdir)
result = create_dynamic_execution_options(
initial_headers={"foo": "bar"}, plugin=f"auth_plugin_{state}:auth_func"
)
sys.path.pop()
return result
exec_options = compute_exec_options("OK")
assert exec_options.remote_store_headers == {
"store": "abc",
"foo": "baz",
"store_url": "grpc://fake.url:10",
}
assert exec_options.remote_execution_headers == {"exec": "xyz", "foo": "baz"}
assert exec_options.remote_cache_read is True
assert exec_options.remote_instance_name == "custom_instance"
exec_options = compute_exec_options("UNAVAILABLE")
assert exec_options.remote_cache_read is False
assert exec_options.remote_instance_name == "main"
def test_execution_options_remote_addresses() -> None:
# Test that we properly validate and normalize the scheme.
def create_exec_options(
remote_store_address: str, remote_execution_address: str
) -> ExecutionOptions:
ob = create_options_bootstrapper(
[
f"--remote-store-address={remote_store_address}",
f"--remote-execution-address={remote_execution_address}",
]
)
_build_config, options = OptionsInitializer(ob).build_config_and_options(
ob, CompleteEnvironment({}), raise_=False
)
return ExecutionOptions.from_options(
options.for_global_scope(), DynamicRemoteExecutionOptions.disabled()
)
host = "fake-with-http-in-url.com:10"
exec_options = create_exec_options(f"grpc://{host}", f"grpc://{host}")
assert exec_options.remote_execution_address == f"http://{host}"
assert exec_options.remote_store_address == f"http://{host}"
exec_options = create_exec_options(f"grpcs://{host}", f"grpcs://{host}")
assert exec_options.remote_execution_address == f"https://{host}"
assert exec_options.remote_store_address == f"https://{host}"
with pytest.raises(ExecutionError):
create_exec_options(f"http://{host}", f"grpc://{host}")
with pytest.raises(ExecutionError):
create_exec_options(f"grpc://{host}", f"https:://{host}")
def test_invalidation_globs() -> None:
# Confirm that an un-normalized relative path in the pythonpath is filtered out.
suffix = "something-ridiculous"
ob = OptionsBootstrapper.create(env={}, args=[f"--pythonpath=../{suffix}"], allow_pantsrc=False)
globs = GlobalOptions.compute_pantsd_invalidation_globs(
get_buildroot(), ob.bootstrap_options.for_global_scope()
)
for glob in globs:
assert suffix not in glob
| [
"noreply@github.com"
] | noreply@github.com |
21b2263b0e0c833efceb0ba0874537b1f524c9f7 | d9165259ded57d68d8ad50b45b56bffed85e055c | /webscrape/views.py | c789ed2d29a0d138bd10f27ad55d855af1521559 | [] | no_license | developer-aj/WebScraping | 23b295ff4fa3eab4a72e5e0b7491672202198340 | dba52db736bffa5e93647fdead51169413c69a6b | refs/heads/master | 2020-05-19T14:02:15.688030 | 2015-03-27T09:56:05 | 2015-03-27T09:56:05 | 32,788,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | from django.shortcuts import render, get_object_or_404, redirect
from .models import web
from .forms import PostForm
from urllib import *
from re import *
from sets import Set
# Create your views here.
def post_new(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
ul = "http://"+post.website
post.links, post.email = crawler(ul)
post.save()
return render(request, 'webscrape/post_detail.html', {'post': post})
else:
form = PostForm()
return render(request, 'webscrape/post_new.html', {'form': form})
def post_detail(request, pk):
post = get_object_or_404(web, pk=pk)
return render(request, 'webscrape/post_detail.html', {'post': post})
def crawler(addr):
html = urlopen(addr).read()
links = Set(findall(r'(http://[a-z0-9A-Z].*?)"',html))
links = links.union(Set(findall(r'(https://[a-z0-9A-Z].*?)"',html)))
emails = Set(findall(r'([a-z\._A-Z0-9]+@[a-z\._A-Z0-9]+)',html))
links_list = links
links_data = ''
email_data = ''
for link in links:
html_child = urlopen(link).read()
emails = emails.union(Set(findall(r'([a-z\._A-Z0-9]+@[a-z\._A-Z0-9]+)',html_child)))
email_data = '\n'.join(emails)
links_list = links_list.union(Set(findall(r'(http://[a-z0-9A-Z].*?)"',html_child)))
links_list = links_list.union(Set(findall(r'(https://[a-z0-9A-Z].*?)"',html_child)))
links_data = '\n'.join(links_list)
return [links_data, email_data]
| [
"ajju.kvmc@gmail.com"
] | ajju.kvmc@gmail.com |
f35a1255a58d91c36ed5bb36d3db683c8d4278c1 | c14d8d4e648fc6433ddb4cbef790e93e23c8bc8d | /BankApp/urls.py | 8168201860000704a2b372459e4c12045fb387e7 | [] | no_license | dilshamony/BankAppProject | f9b1f77713d6aaf2b3814886e775df5c45aabb52 | bd6b93b00aefe4440b6718cbd0134fd90b5c35dd | refs/heads/master | 2023-04-27T00:56:05.333750 | 2021-05-21T14:13:03 | 2021-05-21T14:13:03 | 369,554,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """BankApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path("bankapp/",include("mybank.urls"))
]
| [
"dilsham998@gmail.com"
] | dilsham998@gmail.com |
7998661e4b47331b7ada6e2e919fce2cdc61fcdc | a7860b1921e2ecb4d134fb9fd290551705edf90e | /build/universal_robot/ur10_moveit_config/catkin_generated/pkg.develspace.context.pc.py | 7d54442ee1dc4b46dd510f0e382b640cc900a3da | [] | no_license | WRuotolo/TRI_dish_hand | c586a9cc93fb69b583755cf7f468b02421434151 | 52f31fb3e6a6fba89ed4d25e0dc4770d0c191064 | refs/heads/master | 2020-04-17T01:54:14.026935 | 2019-01-17T00:23:48 | 2019-01-17T00:23:48 | 166,112,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "ur10_moveit_config"
PROJECT_SPACE_DIR = "/home/wruotolo/catkin_ws/devel"
PROJECT_VERSION = "1.1.9"
| [
"wruotolo@stanford.edu"
] | wruotolo@stanford.edu |
46d66199b07078ad113d2244608aa0f3dcff80bb | ed8cdcce521b8cab33c66f716c0886e17f035d21 | /.history/script/get_cpu_mem_info_20191222122843.py | 8d68937cd4d25850ae1c036ceb08d000a04b8098 | [] | no_license | deancsdfy/AndroidPerformanceTool_windows | 8ac35729bc651c3af551f090d6788b6ee3f17eb5 | c4906aa9347e8e5eca68dbb7cf2d66a327c70d1f | refs/heads/master | 2020-11-27T20:38:55.014228 | 2020-01-09T15:55:52 | 2020-01-09T15:55:52 | 229,593,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,591 | py | #! python3
#coding=utf-8
import sys,os,re
print(sys.path)
sys.path.append('.')
from public import publicfunction as util
PATH = lambda p: os.path.abspath(p)
#获取当前应用包名
package_name = util.get_current_packagename()
# print('本次测试APP为:%s' %(package_name))
#获取men cpu 占用情况
def top():
print('Starting get mem cpu information...')
pid=get_pid()
print(pid)
top_info = util.shell("top -n 1 | grep %d" %(int(pid))).stdout.readlines()
for x in top_info:
temp_list = x.split()
#print(temp_list[8])
cpu=float(temp_list[8])
#cpu.append(float(temp_list[8]))
#print(temp_list[9])
mem=float(temp_list[9])
#mem.append(float(temp_list[9]))
print(cpu)
print(mem)
return (cpu,mem)
def getCpuNums():
num_info = util.shell('cat /proc/cpuinfo|grep processor').stdout.readlines()
# print("cpu nums is %d" %(len(num_info)))
return len(num_info)
def getCpuInfo():
# print('Starting get mem cpu information...')
pid = get_pid()
# print(pid)
cpunums=getCpuNums()
top_info = util.shell('top -n 1 | grep %d' % (int(pid))).stdout.readlines()
if(len(top_info)!=0):
for x in top_info:
temp_list = x.split()
# print(temp_list[8])
if getSDKVersion() == '23':
cpu = round(float(str(temp_list[2])[2:-2])/cpunums,2)
print(cpu)
elif (temp_list[8]!=" "):
print(float(temp_list[8]))
cpu = round(float(temp_list[8])/cpunums,2)
# print(cpu)
else:
cpu = 0.0
return cpu
else:
return 0.0
def getMemInfo():
# print('start get mem information....')
pid=get_pid()
# print(pid)
if getSDKVersion() == '23':
temp_list = util.shell('top -n 1 | grep %d' % (int(pid))).stdout.readlines()
print(temp_list[6])
mem=round(float(temp_list[6])/1024,1)
else:
mem_info = util.shell('dumpsys meminfo %d |grep TOTAL:' %(int(pid))).stdout.readlines()
for x in mem_info:
temp_list = x.split()
mem=round(float(temp_list[1])/1024,1)
print(mem)
return mem
#获取机型名称
def getDevicesName():
devicesName = str(util.shell('getprop ro.product.model').stdout.read())
return devicesName
# 获取系统版本
def getSDKVersion():
SDKVersion = str(util.shell('getprop ro.build.version.sdk').stdout.read())[2:-7]
return SDKVersion
#获取pid
def get_pid():
# 正则匹配出package和activity的pid
pattern = re.compile(r"[a-zA-Z0-9\.]+=.[0-9\.]+")
package = util.shell('dumpsys activity top| grep ACTIVITY').stdout.read()
pid = pattern.findall(package.decode())[-1].split('=')[1]
# pid_info = util.shell('ps| grep %s' %(package_name)).stdout.readlines()
# print(pid_info)
# pid = pid_info[0].split()[1]
# print('pid为: %s' %(pid))
return pid
#获取uid
def get_uid():
cmd = 'cat /proc/'+ get_pid() + '/status'
uid_info = util.shell(cmd).stdout.readlines()
uid = uid_info[6].split()[1]
print('uid为:%s' %(uid))
return str(uid)
#上传流量,暂时不可用,需查下其他方式获取上行流量
def get_flow_send():
cmd = '"cat proc/net/xt_qtaguid/stats|grep '+'%s"'%get_uid()
print(cmd)
flow = util.shell(cmd).stdout.readlines()
print(flow)
if __name__ == "__main__":
print("Starting get top information...")
#get_flow_send()
#top()
getSDKVersion()
getCpuInfo()
getMemInfo() | [
"denacsdfy@gmail.com"
] | denacsdfy@gmail.com |
d6ebf46473412fa246c591a471b445630dae2adb | df11a013917ffa0ef9a8e2f1ddcf4d12af65812f | /code_cm17/trainer/calc_jaccard_index.py | fcc1dba1f985e0ea18f8e157dff64596e7d72ada | [
"MIT"
] | permissive | micimize/DigitalHistoPath | 0534e6e7cecca73b299620be55c8bbaa1b39bf99 | f2a4dd03761e321c35b1b2e17de3aa4b3ba49511 | refs/heads/master | 2022-11-09T11:52:09.462891 | 2020-06-03T16:23:07 | 2020-06-03T16:23:07 | 272,494,643 | 0 | 0 | MIT | 2020-06-15T16:51:43 | 2020-06-15T16:51:43 | null | UTF-8 | Python | false | false | 23,774 | py | #Imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import glob
import random
import imgaug
from imgaug import augmenters as iaa
from PIL import Image
from tqdm import tqdm
import matplotlib.pyplot as plt
import openslide
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, BatchNormalization, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, concatenate, Concatenate, UpSampling2D, Activation
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.applications.densenet import DenseNet121
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard
from tensorflow.keras import metrics
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms # noqa
import sklearn.metrics
import io
import itertools
from six.moves import range
import time
import cv2
from skimage.color import rgb2hsv
from skimage.filters import threshold_otsu
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.getcwd())))
from models.seg_models import unet_densenet121, get_inception_resnet_v2_unet_softmax
# Random Seeds
np.random.seed(0)
random.seed(0)
tf.set_random_seed(0)
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
import tifffile
import skimage.io as io
import pandas as pd
import json
# In[2]:
# Image Helper Functions
def imsave(*args, **kwargs):
"""
Concatenate the images given in args and saves them as a single image in the specified output destination.
Images should be numpy arrays and have same dimensions along the 0 axis.
imsave(im1,im2,out="sample.png")
"""
args_list = list(args)
for i in range(len(args_list)):
if type(args_list[i]) != np.ndarray:
print("Not a numpy array")
return 0
if len(args_list[i].shape) == 2:
args_list[i] = np.dstack([args_list[i]]*3)
if args_list[i].max() == 1:
args_list[i] = args_list[i]*255
out_destination = kwargs.get("out",'')
try:
concatenated_arr = np.concatenate(args_list,axis=1)
im = Image.fromarray(np.uint8(concatenated_arr))
except Exception as e:
print(e)
import ipdb; ipdb.set_trace()
return 0
if out_destination:
print(f"Saving to {out_destination}")
im.save(out_destination)
else:
return im
def imshow(*args,**kwargs):
""" Handy function to show multiple plots in on row, possibly with different cmaps and titles
Usage:
imshow(img1, title="myPlot")
imshow(img1,img2, title=['title1','title2'])
imshow(img1,img2, cmap='hot')
imshow(img1,img2,cmap=['gray','Blues']) """
cmap = kwargs.get('cmap', 'gray')
title= kwargs.get('title','')
axis_off = kwargs.get('axis_off','')
if len(args)==0:
raise ValueError("No images given to imshow")
elif len(args)==1:
plt.title(title)
plt.imshow(args[0], interpolation='none')
else:
n=len(args)
if type(cmap)==str:
cmap = [cmap]*n
if type(title)==str:
title= [title]*n
plt.figure(figsize=(n*5,10))
for i in range(n):
plt.subplot(1,n,i+1)
plt.title(title[i])
plt.imshow(args[i], cmap[i])
if axis_off:
plt.axis('off')
plt.show()
def normalize_minmax(data):
"""
Normalize contrast across volume
"""
_min = np.float(np.min(data))
_max = np.float(np.max(data))
if (_max-_min)!=0:
img = (data - _min) / (_max-_min)
else:
img = np.zeros_like(data)
return img
# Functions
def BinMorphoProcessMask(mask):
"""
Binary operation performed on tissue mask
"""
close_kernel = np.ones((20, 20), dtype=np.uint8)
image_close = cv2.morphologyEx(np.array(mask), cv2.MORPH_CLOSE, close_kernel)
open_kernel = np.ones((5, 5), dtype=np.uint8)
image_open = cv2.morphologyEx(np.array(image_close), cv2.MORPH_OPEN, open_kernel)
kernel = np.ones((20, 20), dtype=np.uint8)
image = cv2.dilate(image_open,kernel,iterations = 1)
return image
def get_bbox(cont_img, rgb_image=None):
contours, _ = cv2.findContours(cont_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rgb_contour = None
if rgb_image is not None:
rgb_contour = rgb_image.copy()
line_color = (0, 0, 255) # blue color code
cv2.drawContours(rgb_contour, contours, -1, line_color, 2)
bounding_boxes = [cv2.boundingRect(c) for c in contours]
for x, y, h, w in bounding_boxes:
rgb_contour = cv2.rectangle(rgb_contour,(x,y),(x+h,y+w),(0,255,0),2)
return bounding_boxes, rgb_contour
def get_all_bbox_masks(mask, stride_factor):
"""
Find the bbox and corresponding masks
"""
bbox_mask = np.zeros_like(mask)
bounding_boxes, _ = get_bbox(mask)
y_size, x_size = bbox_mask.shape
for x, y, h, w in bounding_boxes:
x_min = x - stride_factor
x_max = x + h + stride_factor
y_min = y - stride_factor
y_max = y + w + stride_factor
if x_min < 0:
x_min = 0
if y_min < 0:
y_min = 0
if x_max > x_size:
x_max = x_size - 1
if y_max > y_size:
y_max = y_size - 1
bbox_mask[y_min:y_max, x_min:x_max]=1
return bbox_mask
def get_all_bbox_masks_with_stride(mask, stride_factor):
"""
Find the bbox and corresponding masks
"""
bbox_mask = np.zeros_like(mask)
bounding_boxes, _ = get_bbox(mask)
y_size, x_size = bbox_mask.shape
for x, y, h, w in bounding_boxes:
x_min = x - stride_factor
x_max = x + h + stride_factor
y_min = y - stride_factor
y_max = y + w + stride_factor
if x_min < 0:
x_min = 0
if y_min < 0:
y_min = 0
if x_max > x_size:
x_max = x_size - 1
if y_max > y_size:
y_max = y_size - 1
bbox_mask[y_min:y_max:stride_factor, x_min:x_max:stride_factor]=1
return bbox_mask
def find_largest_bbox(mask, stride_factor):
"""
Find the largest bounding box encompassing all the blobs
"""
y_size, x_size = mask.shape
x, y = np.where(mask==1)
bbox_mask = np.zeros_like(mask)
x_min = np.min(x) - stride_factor
x_max = np.max(x) + stride_factor
y_min = np.min(y) - stride_factor
y_max = np.max(y) + stride_factor
if x_min < 0:
x_min = 0
if y_min < 0:
y_min = 0
if x_max > x_size:
x_max = x_size - 1
if y_min > y_size:
y_max = y_size - 1
bbox_mask[x_min:x_max, y_min:y_max]=1
return bbox_mask
def TissueMaskGeneration(slide_obj, level, RGB_min=50):
img_RGB = slide_obj.read_region((0, 0),level,slide_obj.level_dimensions[level])
img_RGB = np.transpose((img_RGB.convert('RGB')),axes=[1,0,2])
img_HSV = rgb2hsv(img_RGB)
background_R = img_RGB[:, :, 0] > threshold_otsu(img_RGB[:, :, 0])
background_G = img_RGB[:, :, 1] > threshold_otsu(img_RGB[:, :, 1])
background_B = img_RGB[:, :, 2] > threshold_otsu(img_RGB[:, :, 2])
tissue_RGB = np.logical_not(background_R & background_G & background_B)
tissue_S = img_HSV[:, :, 1] > threshold_otsu(img_HSV[:, :, 1])
min_R = img_RGB[:, :, 0] > RGB_min
min_G = img_RGB[:, :, 1] > RGB_min
min_B = img_RGB[:, :, 2] > RGB_min
tissue_mask = tissue_S & tissue_RGB & min_R & min_G & min_B
# r = img_RGB[:,:,0] < 235
# g = img_RGB[:,:,1] < 210
# b = img_RGB[:,:,2] < 235
# tissue_mask = np.logical_or(r,np.logical_or(g,b))
return tissue_mask
def TissueMaskGenerationPatch(patchRGB):
'''
Returns mask of tissue that obeys the threshold set by paip
'''
r = patchRGB[:,:,0] < 235
g = patchRGB[:,:,1] < 210
b = patchRGB[:,:,2] < 235
tissue_mask = np.logical_or(r,np.logical_or(g,b))
return tissue_mask
def TissueMaskGeneration_BIN(slide_obj, level):
img_RGB = np.transpose(np.array(slide_obj.read_region((0, 0),
level,
slide_obj.level_dimensions[level]).convert('RGB')),
axes=[1, 0, 2])
img_HSV = cv2.cvtColor(img_RGB, cv2.COLOR_BGR2HSV)
img_S = img_HSV[:, :, 1]
_,tissue_mask = cv2.threshold(img_S, 0, 255, cv2.THRESH_BINARY)
return np.array(tissue_mask)
def TissueMaskGeneration_BIN_OTSU(slide_obj, level):
img_RGB = np.transpose(np.array(slide_obj.read_region((0, 0),
level,
slide_obj.level_dimensions[level]).convert('RGB')),
axes=[1, 0, 2])
img_HSV = cv2.cvtColor(img_RGB, cv2.COLOR_BGR2HSV)
img_S = img_HSV[:, :, 1]
_,tissue_mask = cv2.threshold(img_S, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return np.array(tissue_mask)
def labelthreshold(image, threshold=0.5):
label = np.zeros_like(image)
label[image>=threshold] = 1
# np.place(image,image>=threshold, 1)
# np.place(image,image<threshold, 0)
return np.uint8(label)
def calc_jacc_score(x,y,smoothing=1):
for var in [x,y]:
np.place(var,var==255,1)
numerator = np.sum(x*y)
denominator = np.sum(np.logical_or(x,y))
return (numerator+smoothing)/(denominator+smoothing)
def get_tumor_fraction(mask_image):
fraction = np.count_nonzero(mask_image)/np.prod(mask_image.shape)
return fraction
# In[3]:
# DataLoader Implementation
class WSIStridedPatchDataset(Dataset):
"""
Data producer that generate all the square grids, e.g. 3x3, of patches,
from a WSI and its tissue mask, and their corresponding indices with
respect to the tissue mask
"""
def __init__(self, wsi_path, mask_path, label_path=None, image_size=256,
normalize=True, flip='NONE', rotate='NONE',
level=5, sampling_stride=16, roi_masking=True):
"""
Initialize the data producer.
Arguments:
wsi_path: string, path to WSI file
mask_path: string, path to mask file in numpy format OR None
label_mask_path: string, path to ground-truth label mask path in tif file or
None (incase of Normal WSI or test-time)
image_size: int, size of the image before splitting into grid, e.g. 768
patch_size: int, size of the patch, e.g. 256
crop_size: int, size of the final crop that is feed into a CNN,
e.g. 224 for ResNet
normalize: bool, if normalize the [0, 255] pixel values to [-1, 1],
mostly False for debuging purpose
flip: string, 'NONE' or 'FLIP_LEFT_RIGHT' indicating the flip type
rotate: string, 'NONE' or 'ROTATE_90' or 'ROTATE_180' or
'ROTATE_270', indicating the rotate type
level: Level to extract the WSI tissue mask
roi_masking: True: Multiplies the strided WSI with tissue mask to eliminate white spaces,
False: Ensures inference is done on the entire WSI
sampling_stride: Number of pixels to skip in the tissue mask, basically it's the overlap
fraction when patches are extracted from WSI during inference.
stride=1 -> consecutive pixels are utilized
stride= image_size/pow(2, level) -> non-overalaping patches
"""
self._wsi_path = wsi_path
self._mask_path = mask_path
self._label_path = label_path
self._image_size = image_size
self._normalize = normalize
self._flip = flip
self._rotate = rotate
self._level = level
self._sampling_stride = sampling_stride
self._roi_masking = roi_masking
self._preprocess()
def _preprocess(self):
self._slide = openslide.OpenSlide(self._wsi_path)
if self._label_path is not None:
self._label_slide = openslide.OpenSlide(self._label_path)
X_slide, Y_slide = self._slide.level_dimensions[0]
print("Image dimensions: (%d,%d)" %(X_slide,Y_slide))
factor = self._sampling_stride
if self._mask_path is not None:
mask_file_name = os.path.basename(self._mask_path)
if mask_file_name.endswith('.tiff'):
mask_obj = openslide.OpenSlide(self._mask_path)
self._mask = np.array(mask_obj.read_region((0, 0),
self._level,
mask_obj.level_dimensions[self._level]).convert('L')).T
np.place(self._mask,self._mask>0,255)
else:
# Generate tissue mask on the fly
self._mask = TissueMaskGeneration(self._slide, self._level)
# morphological operations ensure the holes are filled in tissue mask
# and minor points are aggregated to form a larger chunk
self._mask = BinMorphoProcessMask(np.uint8(self._mask))
# self._all_bbox_mask = get_all_bbox_masks(self._mask, factor)
# self._largest_bbox_mask = find_largest_bbox(self._mask, factor)
# self._all_strided_bbox_mask = get_all_bbox_masks_with_stride(self._mask, factor)
X_mask, Y_mask = self._mask.shape
# print (self._mask.shape, np.where(self._mask>0))
# imshow(self._mask.T)
# cm17 dataset had issues with images being power's of 2 precisely
# if X_slide != X_mask or Y_slide != Y_mask:
print('Mask (%d,%d) and Slide(%d,%d) '%(X_mask,Y_mask,X_slide,Y_slide))
if X_slide // X_mask != Y_slide // Y_mask:
raise Exception('Slide/Mask dimension does not match ,'
' X_slide / X_mask : {} / {},'
' Y_slide / Y_mask : {} / {}'
.format(X_slide, X_mask, Y_slide, Y_mask))
self._resolution = np.round(X_slide * 1.0 / X_mask)
if not np.log2(self._resolution).is_integer():
raise Exception('Resolution (X_slide / X_mask) is not power of 2 :'
' {}'.format(self._resolution))
# all the idces for tissue region from the tissue mask
self._strided_mask = np.ones_like(self._mask)
ones_mask = np.zeros_like(self._mask)
ones_mask[::factor, ::factor] = self._strided_mask[::factor, ::factor]
if self._roi_masking:
self._strided_mask = ones_mask*self._mask
# self._strided_mask = ones_mask*self._largest_bbox_mask
# self._strided_mask = ones_mask*self._all_bbox_mask
# self._strided_mask = self._all_strided_bbox_mask
else:
self._strided_mask = ones_mask
# print (np.count_nonzero(self._strided_mask), np.count_nonzero(self._mask[::factor, ::factor]))
# imshow(self._strided_mask.T, self._mask[::factor, ::factor].T)
# imshow(self._mask.T, self._strided_mask.T)
self._X_idcs, self._Y_idcs = np.where(self._strided_mask)
self._idcs_num = len(self._X_idcs)
def __len__(self):
return self._idcs_num
def save_scaled_imgs(self):
scld_dms = self._slide.level_dimensions[2]
self._slide_scld = np.transpose(self._slide.read_region((0,0),2,scld_dms).convert('RGB'),[1,0,2])
if self._label_path is not None:
self._label_scld = np.array(self._label_slide.read_region((0,0),4,scld_dms).convert('L')).T
np.place(self._label_scld,self._label_scld>0,255)
def save_get_mask(self, save_path):
np.save(save_path, self._mask)
def get_mask(self):
return self._mask
def get_strided_mask(self):
return self._strided_mask
def __getitem__(self, idx):
x_coord, y_coord = self._X_idcs[idx], self._Y_idcs[idx]
x_max_dim,y_max_dim = self._slide.level_dimensions[0]
# x = int(x_coord * self._resolution)
# y = int(y_coord * self._resolution)
x = int(x_coord * self._resolution - self._image_size//2)
y = int(y_coord * self._resolution - self._image_size//2)
# x = int(x_coord * self._resolution)
# y = int(y_coord * self._resolution)
#If Image goes out of bounds
if x>(x_max_dim - image_size):
x = x_max_dim - image_size
elif x<0:
x = 0
if y>(y_max_dim - image_size):
y = y_max_dim - image_size
elif y<0:
y = 0
#Converting pil image to np array transposes the w and h
img = np.transpose(self._slide.read_region(
(x, y), 0, (self._image_size, self._image_size)).convert('RGB'),[1,0,2])
if self._label_path is not None:
label_img = np.transpose(self._label_slide.read_region(
(x, y), 0, (self._image_size, self._image_size)).convert('L'),[1,0])
else:
#print('No label img')
label_img = np.transpose(Image.fromarray(np.zeros((self._image_size, self._image_size), dtype=np.uint8)))
if self._flip == 'FLIP_LEFT_RIGHT':
img = img.transpose(Image.FLIP_LEFT_RIGHT)
label_img = label_img.transpose(Image.FLIP_LEFT_RIGHT)
if self._rotate == 'ROTATE_90':
img = img.transpose(Image.ROTATE_90)
label_img = label_img.transpose(Image.ROTATE_90)
if self._rotate == 'ROTATE_180':
img = img.transpose(Image.ROTATE_180)
label_img = label_img.transpose(Image.ROTATE_180)
if self._rotate == 'ROTATE_270':
img = img.transpose(Image.ROTATE_270)
label_img = label_img.transpose(Image.ROTATE_270)
# PIL image: H x W x C
img = np.array(img, dtype=np.float32)
label_img = np.array(label_img, dtype=np.uint8)
np.place(label_img, label_img>0, 255)
if self._normalize:
img = (img - 128.0)/128.0
return (img, x, y, label_img)
# In[5]:
#Model
model = get_inception_resnet_v2_unet_softmax((None, None), weights=None)
#model_path = glob.glob('../../results/saved_models/dense_80k/fold2/model.06*')[0]
model_root_path = '../../results/saved_models/incep_viable_200k/'
model_path = glob.glob(os.path.join(model_root_path,'5fold_0/model.10*'))[0]
core_config = tf.ConfigProto()
core_config.gpu_options.allow_growth = False
# core_config.gpu_options.per_process_gpu_memory_fraction=0.4
session =tf.Session(config=core_config)
K.set_session(session)
model.load_weights(model_path)
print ("Loaded Model Weights")
#Stitcher
start_time = time.time()
kfold_k = 5
fold = 0
mask_path = None
image_size = 1024
im_by2 = image_size//2
im_by4 = image_size//4
sampling_stride = image_size #At the 0 level
batch_size = 4
mining_threshold = 0.6 #Lesser than which to mine points
mined_points_path = os.path.join(model_root_path, '%dfold_%d'%(kfold_k,fold),'mined_points')
try:
os.makedirs(mined_points_path)
except FileExistsError:
if os.listdir(mined_points_path) != []:
print("Out folder exists and is non-empty, continue?")
input()
def save_mined_points(x,y,tf):
if tf > 0.0:
coord_type = 'normal'
else:
coord_type = 'tumor'
with open(os.path.join(mined_points_path,'%s.txt' % coord_type),'a') as f:
f.write('%s,%s,%d,%d,%f\n'%(wsi_path,label_path,x,y,tf))
#Get train ids from cv split file containing path to wsi images
sample_ids = [ x.split('/')[-2] for x in list(pd.read_csv('../../data/raw-data/cross_val_splits_%d_whole/training_fold_%d.csv'%(kfold_k,fold))['Image_Path'])]
for i,sample_id in enumerate(sample_ids):
print(i,len(sample_id),sample_id)
sample_dir = os.path.join('..','..','data','raw-data','train',sample_id)
wsi_path = glob.glob(os.path.join(sample_dir,'*.svs'))[0]
label_path = glob.glob(os.path.join(sample_dir,'*viable*.tiff'))[0]
dataset_obj = WSIStridedPatchDataset(wsi_path,
mask_path,
label_path,
image_size=image_size,
normalize=True,
flip=None, rotate=None,
level=2, sampling_stride=sampling_stride//16, roi_masking=True)
meta_dict= {'num': 0, 'den': 0, 'jaccs':[]}
dataloader = DataLoader(dataset_obj, batch_size=batch_size, num_workers=0, drop_last=True)
dataset_obj.save_scaled_imgs()
imsave(dataset_obj.get_mask(), dataset_obj.get_strided_mask(), dataset_obj._label_scld, dataset_obj._slide_scld, out=os.path.join(mined_points_path, sample_id+'.png'))
print("Total iterations: %d and %d" % (dataloader.__len__(),dataloader.dataset.__len__()))
for i,(data, x, y, label) in enumerate(dataloader):
#print(i,x,y)
image_patches = data.cpu().data.numpy()
patch_mask = TissueMaskGenerationPatch(image_patches[0]*128+128)
label_patches = label.cpu().data.numpy()
pred_map = model.predict(image_patches,verbose=0,batch_size=1)
for j in range(batch_size):
jacc_score = calc_jacc_score(labelthreshold(pred_map[j,:,:,1],threshold=0.45),label_patches[j])
meta_dict['jaccs'].append(jacc_score)
meta_dict['num']+=int(np.sum(labelthreshold(pred_map[j,:,:,1],threshold=0.45)*label_patches[j].clip(max=1)))
meta_dict['den']+=int(np.sum(np.logical_or(labelthreshold(pred_map[j,:,:,1],threshold=0.45),label_patches[j])))
if jacc_score < mining_threshold:
#top-left
tf = get_tumor_fraction(label_patches[j,:im_by2,:im_by2])
point_x = int(x[j]) + im_by4
point_y = int(y[j]) + im_by4
save_mined_points(point_x,point_y,tf)
#top-right
tf = get_tumor_fraction(label_patches[j,:im_by2,im_by2:image_size])
point_y += im_by4
save_mined_points(point_x,point_y,tf)
#bottom-right
tf = get_tumor_fraction(label_patches[j,im_by2:image_size,im_by2:image_size])
point_x += im_by4
save_mined_points(point_x,point_y,tf)
#bottom-left
tf = get_tumor_fraction(label_patches[j,im_by2:image_size,:im_by2])
point_y -= im_by4
save_mined_points(point_x,point_y,tf)
#print("Jaccard: %.3f" %(jacc_score))
#imshow(pred_map[j,:,:,1],label_patches[j],np.uint8(image_patches[j]*128+128))
#imshow(pred_map[j,:,:,1],labelthreshold(pred_map[j,:,:,1],0.45),label_patches[j])
if (i)%100==0:
print("Completed %i Time elapsed %.2f min"%(i,(time.time()-start_time)/60))
print(meta_dict['num'],meta_dict['den'])
meta_dict['jaccs_index'] = meta_dict['num']/meta_dict['den']
meta_dict['histogram'] = str(np.histogram(meta_dict['jaccs'], bins=11))
with open(os.path.join(mined_points_path, sample_id+'.json'),'w') as f:
json.dump(meta_dict,f)
start_time = time.time()
| [
"haranrajkumar97@gmail.com"
] | haranrajkumar97@gmail.com |
30527b189207581978417b435e5e8bbf4dfc2c7a | 16402373633ceea610027b09111c407f8872cf4e | /code/files/1.py | d9f13d87571192c55fbc529556023c9b31d0ee43 | [] | no_license | rahulcs754/100daysofcode-Python | 0e1ae1f2bc0911a71c9ee15f2c57f7d3215aad01 | 555f03dc0414ac6ef6fd6db84de8245e5e93caa5 | refs/heads/master | 2020-04-19T16:18:13.871768 | 2019-03-07T15:33:13 | 2019-03-07T15:33:13 | 168,300,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 762 | py | #Define Node Class
class Node:
#Function to initialize the node object
def __init__(self, data):
self.data = data
self.next = None
#linked list class
class LinkedList:
#function to initialize the linked
def __init__(self):
self.head = None
def printList(self):
temp = self.head
while(temp):
print temp.data,
temp = temp.next
#Code execution starts here
if __name__ == '__main__':
#start with the empty list
llist = LinkedList()
llist.head = Node(1)
second = Node(2)
third = Node(3)
#link first node with second
llist.head.next = second
#link second node with the third node
second.next = third
llist.printList()
| [
"rahul.shukla754@gmail.com"
] | rahul.shukla754@gmail.com |
22c8f6ba095782b5de2564ab577536fd248761db | 144d5addc2eaf61e3f0590b5e0cd3e70f26e3b28 | /ImageLoader.py | 23923133c5da70550664fd5db7022ddd58fd12b9 | [
"MIT"
] | permissive | Fmccline/ConnectFour | 985c8cc5d8a627f5f37a6d1e56922badd1a98f46 | c92b9b76c1fdbb567589484452eff2d64af4b456 | refs/heads/master | 2020-04-20T14:04:26.414009 | 2019-03-13T00:53:50 | 2019-03-13T00:53:50 | 168,887,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from PIL import Image, ImageTk
class ImageLoader:
@staticmethod
def load_image(path):
image = Image.open(path)
photo = ImageTk.PhotoImage(image)
return photo
| [
"frankmcline@gmail.com"
] | frankmcline@gmail.com |
e2b351f7ed30c8c47d2927dc41a950e09289b13b | 6f7df24b2e563b43c6f78d9b7b987d8a3abb980e | /aoc/aoc2020/days/day19/part_2.py | 019ab79f1b627cd4d854a28542f0cbdcdb5b6552 | [] | no_license | bendikjohansen/adventofcode | 1ca2d9a674c84faafffd2e087592eb4829c3fd25 | 8381c2e44937124779f10a0c8791761e2db36944 | refs/heads/main | 2023-02-05T17:45:34.973773 | 2020-12-28T20:10:22 | 2020-12-28T20:10:22 | 325,106,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,940 | py | from collections import defaultdict
from typing import List, Tuple
from aoc.utils import get_input
def parse_rules(rules_raw: str):
rules_dict = defaultdict(lambda: {})
for line in rules_raw.splitlines():
raw_index, content = line.split(': ')
index = int(raw_index)
value = content[1] if content[1] in 'ab' else None
ruleset = [[rules_dict[int(i)]
for i in rule.split()
if i.isnumeric()]
for rule in content.split('|')]
rules_dict[index]['value'] = value
rules_dict[index]['ruleset'] = ruleset
rules_dict[index]['key'] = index
return rules_dict
def matches(rule, message: str, depth: int = 0) -> List[Tuple[bool, str]]:
if depth > 16:
return [False, message]
if rule['value']:
return [(message.startswith(rule['value']), message[1:])]
match_list = []
for rules in rule['ruleset']:
n_matches = [(True, message)]
for n_rule in rules:
n_temp = []
for is_valid, n_message in n_matches:
if is_valid:
n_temp.extend(matches(n_rule, n_message, depth + 1))
n_matches = n_temp
match_list.extend(n_matches)
return list(filter(lambda x: x[0], match_list))
def strictly_matches(rule, message: str) -> bool:
all_matches = matches(rule, message)
return any([valid and not rest for valid, rest in all_matches])
def solve_part_two(rules_and_messages: str) -> int:
[rules_raw, messages_raw] = rules_and_messages.split('\n\n')
rules_raw += "\n8: 42 | 42 8\n11: 42 31 | 42 11 31"
rules = parse_rules(rules_raw)
messages = messages_raw.splitlines()
return sum([1 for msg in messages if strictly_matches(rules[0], msg)])
if __name__ == "__main__":
rules_and_messages = get_input(2020, 19)
result = solve_part_two(rules_and_messages)
print(result)
| [
"bendikj@gmail.com"
] | bendikj@gmail.com |
b93fb73cfb00f24fe840a59511f2030d637ccdd0 | f6297bc350a1fa4935bb19978e2c209fe46847f9 | /exercises/07_files/task_7_3b.py | 0a8748fc0b04d26b469d7c50935c07084a2c0a13 | [] | no_license | LenaPetrenko/web-python-prog | c02ef1e4195df4738824819c2696370731f515ba | dd14c5c9b8f6f600e3bc5ce28236042f569f44ad | refs/heads/master | 2023-01-06T06:03:18.643768 | 2020-11-03T23:39:10 | 2020-11-03T23:39:10 | 303,650,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 859 | py | # -*- coding: utf-8 -*-
"""
Задание 7.3b
Сделать копию скрипта задания 7.3a.
Переделать скрипт:
- Запросить у пользователя ввод номера VLAN.
- Выводить информацию только по указанному VLAN.
Ограничение: Все задания надо выполнять используя только пройденные темы.
"""
import re
new_vlan = input('Введите номер vlan: ')
new_vlan = int(new_vlan)
file_table = open('/home/std/python-web/exercises/07_files/CAM_table.txt', 'r')
for line in file_table:
if re.search(r'([0-9A-Fa-f]{4}[.]){2}([0-9A-Fa-f]{4})',line)!=None:
vlan = line.split()[0]
vlan= int(vlan)
if new_vlan == vlan:
print(line.replace('DYNAMIC ',''), end='')
| [
"noreply@github.com"
] | noreply@github.com |
1acd544bb22b1b4f7bf92645d30914fe1d19e910 | 31d4371ad53133da613fae0a129c1283ae7ee64d | /python/test_lyapunov.py | 22fa56e67fdaafc74083693b07c065982d5838e1 | [] | no_license | johanickla/n_body_sim_ws20201 | 637cb83177bbf6a036cdf93b3a7b79a95ba68050 | b0d5e426183f6d29720c4caec7ac35f09799bc18 | refs/heads/main | 2023-03-20T03:50:35.690809 | 2021-03-18T18:40:52 | 2021-03-18T18:40:52 | 310,010,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,027 | py | import rebound
import numpy as np
import warnings
import matplotlib.pyplot as plt
import visualize_orbit
h = 0.696
def test_1():
sim1 = visualize_orbit.setup('Helga', h)
print('Number of particles before "init_megno" :', sim1.N)
sim1.init_megno()
print('Number of particles after "init_megno" :', sim1.N)
sim1.status()
sim1.integrate(10)
l1 = sim1.calculate_lyapunov()
# sim1.status()
sim2 = visualize_orbit.setup('Helga', h)
sim2.init_megno()
sim2.integrate(5)
# sim2.status()
sim2.integrate(5)
l2 = sim2.calculate_lyapunov()
# sim2.status()
print('L_exp 1: ', round(l1,3), ' L_exp 2: ', round(l2,3))
def test_2():
# a = [16,64,256,1024,4098]
a = [2,4,8,16,32,64]
Lyapunov=[]
S=[]
T = []
for i in range(len(a)):
times = np.linspace(0, 100, a[i])
l = np.zeros(len(times))
sim = visualize_orbit.setup('Helga', h)
sim.integrator = "whfast"
sim.dt = 0.01
sim.init_megno()
for k in range(a[i]) :
time = times[k]
sim.integrate(time)
exp = round(sim.calculate_lyapunov(),4)
l[k] = exp
# sim.status()
Lyapunov.append(l)
T.append(times)
s = round(sum(Lyapunov[i]),3)
S.append(s)
# print('Lyapunov:' ,Lyapunov)
# print('rowwise sum over Lyapunov array:', S)
# print('Times: ', T)
# sim.status()
fig, ax1 = plt.subplots(1,1,figsize=(10,6))
ax1.grid()
for i in range(len(a)):
ax1.plot(T[i],Lyapunov[i],'o-', label = '%d' %a[i])
# ax1.set_xscale('log')
# ax1.set_yscale('log')
ax1.legend()
fig.savefig('test_lyapunov.png')
def test_3():
sim = visualize_orbit.setup('Helga', h)
sim.init_megno()
for time in np.linspace(0,100,10):
sim.integrate(time)
exp = round(sim.calculate_lyapunov(),4)
print(exp)
sim.status()
if __name__ == "__main__":
# test_1()
test_2()
# test_3()
| [
"49444603+johanickl@users.noreply.github.com"
] | 49444603+johanickl@users.noreply.github.com |
d01e4b67652540008cebe7e18a0e5deb0ff80ca9 | 38d8ae5f83c4ad29ded5a7de7e6f23f6fae041eb | /Chapter03/03_atari_gan.py | 4ea43f8d571f59d36f9f6f0a8e85a65143d2e5b7 | [] | no_license | andrelip/deep_reinforcement_learning_exercises | bff0ee950e730f3621fd2ba74dbbe20629205d4c | 362f90ebb85e589fea4d2bff04fac0a456819af4 | refs/heads/master | 2022-12-08T07:35:10.293593 | 2019-11-06T16:22:05 | 2019-11-06T16:22:05 | 220,040,450 | 0 | 0 | null | 2022-11-21T22:22:24 | 2019-11-06T16:23:22 | Python | UTF-8 | Python | false | false | 7,080 | py | #!/usr/bin/env python
import random
import argparse
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import torchvision.utils as vutils
import gym
import gym.spaces
import numpy as np
log = gym.logger
log.set_level(gym.logger.INFO)
LATENT_VECTOR_SIZE = 100
DISCR_FILTERS = 64
GENER_FILTERS = 64
BATCH_SIZE = 16
# dimension input image will be rescaled
IMAGE_SIZE = 64
LEARNING_RATE = 0.0001
REPORT_EVERY_ITER = 100
SAVE_IMAGE_EVERY_ITER = 1000
class InputWrapper(gym.ObservationWrapper):
"""
Preprocessing of input numpy array:
1. resize image into predefined size
2. move color channel axis to a first place
"""
def __init__(self, *args):
super(InputWrapper, self).__init__(*args)
assert isinstance(self.observation_space, gym.spaces.Box)
old_space = self.observation_space
self.observation_space = gym.spaces.Box(self.observation(old_space.low), self.observation(old_space.high),
dtype=np.float32)
def observation(self, observation):
# resize image
new_obs = cv2.resize(observation, (IMAGE_SIZE, IMAGE_SIZE))
# transform (210, 160, 3) -> (3, 210, 160)
new_obs = np.moveaxis(new_obs, 2, 0)
return new_obs.astype(np.float32) / 255.0
class Discriminator(nn.Module):
def __init__(self, input_shape):
super(Discriminator, self).__init__()
# this pipe converges image into the single number
self.conv_pipe = nn.Sequential(
nn.Conv2d(in_channels=input_shape[0], out_channels=DISCR_FILTERS,
kernel_size=4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS, out_channels=DISCR_FILTERS*2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS*2),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 2, out_channels=DISCR_FILTERS * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS * 4),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 4, out_channels=DISCR_FILTERS * 8,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(DISCR_FILTERS * 8),
nn.ReLU(),
nn.Conv2d(in_channels=DISCR_FILTERS * 8, out_channels=1,
kernel_size=4, stride=1, padding=0),
nn.Sigmoid()
)
def forward(self, x):
conv_out = self.conv_pipe(x)
return conv_out.view(-1, 1).squeeze(dim=1)
class Generator(nn.Module):
def __init__(self, output_shape):
super(Generator, self).__init__()
# pipe deconvolves input vector into (3, 64, 64) image
self.pipe = nn.Sequential(
nn.ConvTranspose2d(in_channels=LATENT_VECTOR_SIZE, out_channels=GENER_FILTERS * 8,
kernel_size=4, stride=1, padding=0),
nn.BatchNorm2d(GENER_FILTERS * 8),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 8, out_channels=GENER_FILTERS * 4,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS * 4),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 4, out_channels=GENER_FILTERS * 2,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS * 2),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS * 2, out_channels=GENER_FILTERS,
kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(GENER_FILTERS),
nn.ReLU(),
nn.ConvTranspose2d(in_channels=GENER_FILTERS, out_channels=output_shape[0],
kernel_size=4, stride=2, padding=1),
nn.Tanh()
)
def forward(self, x):
return self.pipe(x)
def iterate_batches(envs, batch_size=BATCH_SIZE):
batch = [e.reset() for e in envs]
env_gen = iter(lambda: random.choice(envs), None)
while True:
e = next(env_gen)
obs, reward, is_done, _ = e.step(e.action_space.sample())
if np.mean(obs) > 0.01:
batch.append(obs)
if len(batch) == batch_size:
yield torch.tensor(np.array(batch, dtype=np.float32))
batch.clear()
if is_done:
e.reset()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action='store_true', help="Enable cuda computation")
args = parser.parse_args()
device = torch.device("cuda" if args.cuda else "cpu")
envs = [InputWrapper(gym.make(name)) for name in ('Breakout-v0', 'AirRaid-v0', 'Pong-v0')]
input_shape = envs[0].observation_space.shape
net_discr = Discriminator(input_shape=input_shape).to(device)
net_gener = Generator(output_shape=input_shape).to(device)
objective = nn.BCELoss()
gen_optimizer = optim.Adam(params=net_gener.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
dis_optimizer = optim.Adam(params=net_discr.parameters(), lr=LEARNING_RATE, betas=(0.5, 0.999))
writer = SummaryWriter()
gen_losses = []
dis_losses = []
iter_no = 0
true_labels_v = torch.ones(BATCH_SIZE, dtype=torch.float32, device=device)
fake_labels_v = torch.zeros(BATCH_SIZE, dtype=torch.float32, device=device)
for batch_v in iterate_batches(envs):
# generate extra fake samples, input is 4D: batch, filters, x, y
gen_input_v = torch.FloatTensor(BATCH_SIZE, LATENT_VECTOR_SIZE, 1, 1).normal_(0, 1).to(device)
batch_v = batch_v.to(device)
gen_output_v = net_gener(gen_input_v)
# train discriminator
dis_optimizer.zero_grad()
dis_output_true_v = net_discr(batch_v)
dis_output_fake_v = net_discr(gen_output_v.detach())
dis_loss = objective(dis_output_true_v, true_labels_v) + objective(dis_output_fake_v, fake_labels_v)
dis_loss.backward()
dis_optimizer.step()
dis_losses.append(dis_loss.item())
# train generator
gen_optimizer.zero_grad()
dis_output_v = net_discr(gen_output_v)
gen_loss_v = objective(dis_output_v, true_labels_v)
gen_loss_v.backward()
gen_optimizer.step()
gen_losses.append(gen_loss_v.item())
iter_no += 1
if iter_no % REPORT_EVERY_ITER == 0:
log.info("Iter %d: gen_loss=%.3e, dis_loss=%.3e", iter_no, np.mean(gen_losses), np.mean(dis_losses))
writer.add_scalar("gen_loss", np.mean(gen_losses), iter_no)
writer.add_scalar("dis_loss", np.mean(dis_losses), iter_no)
gen_losses = []
dis_losses = []
if iter_no % SAVE_IMAGE_EVERY_ITER == 0:
writer.add_image("fake", vutils.make_grid(gen_output_v.data[:64]), iter_no)
writer.add_image("real", vutils.make_grid(batch_v.data[:64]), iter_no)
| [
"andrestephano@gmail.com"
] | andrestephano@gmail.com |
113a6b7e1708f89fe1a8a5c92d7e15b29f340d23 | 7d738ec701db03e1ed9c2c97570d71692d5a783c | /models/club.py | 3703d73aec5aad737376519d7a3d79cfdaf516b2 | [] | no_license | CristianoYL/MY_TEAM_API | b7579d884f0d08874437bd4c51640ff9bb9d33c1 | c6940fb8b902bf690d6030d00bc4eef9a4559580 | refs/heads/master | 2022-12-10T02:36:28.912345 | 2018-01-01T08:26:11 | 2018-01-01T08:26:11 | 86,953,233 | 2 | 0 | null | 2022-12-07T23:56:17 | 2017-04-02T01:00:06 | Python | UTF-8 | Python | false | false | 1,106 | py | from db import db
class ClubModel(db.Model):
__tablename__ = 'club'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
info = db.Column(db.String(2000))
def __init__(self,_id,name,info):
self.id = _id
self.name = name
self.info = info
def json(self):
return {
"id" : self.id,
"name" : self.name,
"info" : self.info
}
@classmethod
def find_all(cls):
return cls.query.all()
@classmethod
def find_by_name(cls,name): # use case sensitive exact matching
return cls.query.filter_by(name=name)
@classmethod
def find_by_name_fuzzy(cls,name): # use case insensitive partial matching
return cls.query.filter(cls.name.ilike("%"+name+"%"))
@classmethod
def find_by_id(cls,_id):
return cls.query.filter_by(id=_id).first()
def save_to_db(self): ## upsert
db.session.add(self)
db.session.commit()
def delete_from_db(self): ## delete
db.session.delete(self)
db.session.commit()
| [
"li.yin.cristiano@rutgers.edu"
] | li.yin.cristiano@rutgers.edu |
e472330d2bf93d69455f4f677fd0b05d340836ee | 1a8b9758a5663399b74a02d2be15cf50926c46c9 | /Python/Principles_of_Compiler/SLR(1)/SLR.py | bfc49faae5809bcf3097c19833d1eb164cff6046 | [] | no_license | paullinzipeng/School_Works | 37b2a0bc602f0730ffd32cd90aea619ad30921cb | 9d535dd8f705035eccca34ad3365edc7bb99baa5 | refs/heads/master | 2020-03-16T06:31:29.631783 | 2018-07-07T07:26:16 | 2018-07-07T07:26:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,983 | py | from Gramma2 import GRAMMA, VOL
from Generator import Generator
from copy import deepcopy
class SLR:
def __init__(self, GRAMMA, VOL):
self.GRAMMA = GRAMMA
self.VOL = VOL
#self.Generator = Generator(GRAMMA, VOL)
#self.Generator.Free_Left_Recursive()
self.Producer = []
self.Projects = []
self.C = [] * 100
self.ensure = 0
self.Extand_Seq = [list(self.GRAMMA)[0] + '_', [list(self.GRAMMA)[0]]]
#self.Extand_Seq = ['E_', ['E']]
#self.Extand_Seq = ['S_', ['S']]
self.Has_Deri = 0
#self.Generator.First_Gene()
#self.FIRST = self.Generator.first
#self.Generator.Follow_Gene()
#self.FOLLOW = self.Generator.follow
self.FOLLOW = { left : [] for left in self.GRAMMA.keys()}
self.ACTION = {status : { not_end : {} for not_end in self.VOL['T'] } for status in range(0, 1)}
self.GOTO = {status : { not_end : {} for not_end in self.VOL['N'] } for status in range(0, 1)}
def Create_Producer_Sequence(self):
self.Producer.append(self.Extand_Seq)
for key in self.GRAMMA.items():
for right in self.GRAMMA[key[0]]:
if right != '|':
self.Producer.append([key[0], right])
def insert(self, original, new, pos):
return original[:pos] + new + original[pos:]
def Create_Project_Sequence(self):
counter = 0
for producer in self.Producer:
for i in range(0, (len(producer[1]) + 1)):
self.Projects.append(deepcopy((producer)))
if isinstance(self.Projects[counter][1], list):
self.Projects[counter][1].insert(i, '@')
else:
self.Projects[counter][1] = self.insert(self.Projects[counter][1], '@', i)
counter = counter + 1
def Is_Derivation(self, pos):
Derivation_Sequence = []
for proj in self.C[pos]:
if proj[1][-1] == '@':
continue
if proj[1][proj[1].index('@') + 1] in list(set(self.VOL['N'])^set(self.VOL['T'])):
if proj[1][proj[1].index('@') + 1] in Derivation_Sequence:
continue
Derivation_Sequence.append(proj[1][proj[1].index('@') + 1])
return Derivation_Sequence
def GO(self, C, Deri, pos):
flag = 0
for c in C:
if c[1][-1] != '@':
if Deri in c[1][c[1].index('@') + 1]:
flag2 = 0
for c2 in self.C:
if self.Projects[self.Projects.index(c) + 1] in c2:
if Deri in self.VOL['T']:
self.ACTION[self.Has_Deri][Deri] = self.C.index(c2)
flag2 = 1
break
if flag2 == 1:
continue
self.C[pos].append(self.Projects[self.Projects.index(c) + 1])
if Deri in self.VOL['N']:
self.GOTO[self.Has_Deri][Deri] = pos
if Deri in self.VOL['T']:
self.ACTION[self.Has_Deri][Deri] = pos
flag = 1
if pos != self.C.index(self.C[pos]):
self.C[pos] = []
if flag == 1:
return 1
def Is_In_C(self, i):
for c in self.C:
if self.C[i] == c and self.C.index(c) != i:
return self.C.index(c)
else:
return False
def Closure(self, i):
Derivation_Sequence = self.Is_Derivation(i)
if len(Derivation_Sequence) != 0:
#self.Has_Deri = i
for deri in Derivation_Sequence:
for proj in self.Projects:
if deri in proj and proj[1][0] == '@' and proj not in self.C[i]:
self.C[i].append(proj)
def Create_C_Sequence(self):
i = 0
ensure = 0
temp = 0
Derivation_Sequence = []
self.C.append([self.Projects[0]])
while True:
old_C_all = deepcopy(self.C)
if i == 1:
self.C.append([])
while True:
old_C = deepcopy(self.C)
if i == 0:
Derivation_Sequence = self.Is_Derivation(0)
else:
temp = self.C.index(self.C[-1])
for Deri in Derivation_Sequence:
is_append = self.GO(self.C[self.Has_Deri], Deri, temp)
self.Closure(temp)
if is_append == 1:
self.C.append([])
self.GOTO_ADD(temp)
self.ACTION_ADD(temp)
temp = temp + 1
Derivation_Sequence = self.Is_Derivation(i)
if len(Derivation_Sequence) != 0:
self.Has_Deri = i
for deri in Derivation_Sequence:
for proj in self.Projects:
if deri in proj and proj[1][0] == '@' and proj not in self.C[i]:
self.C[i].append(proj)
if old_C == self.C :
if ensure == 0:
ensure = 1
continue
else:
ensure = 0
break
# for temp in self.C[i]:
# print(temp)
i = i + 1
if old_C_all == self.C and self.C.index(self.C[-1]) == i:
del self.C[-1]
for item, j in zip(self.C, range(0, len(self.C))):
for sub_item in item:
if sub_item[1][-1] == '@':
producer = deepcopy(sub_item)
producer[1] = producer[1][:-1]
recursive = 'r' + str(self.Producer.index(producer))
if sub_item[1][0] == list(self.GRAMMA)[0]:
#if item[0][1][-2] == '#':
self.ACTION[j]['#'] = 'acc'
continue
for Deri in self.VOL['T']:
if Deri not in self.FOLLOW[sub_item[0]]:
continue
if self.ACTION[j][Deri] != {}:
self.ACTION[j][Deri] = str(self.ACTION[j][Deri]) + ',' + recursive
continue
self.ACTION[j][Deri] = recursive
break
def GOTO_ADD(self, status):
self.GOTO[status] = { not_end : {} for not_end in self.VOL['N'] }
def ACTION_ADD(self, status):
self.ACTION[status] = { not_end : {} for not_end in self.VOL['T'] }
def CREATE_FOLLOW(self):
self.FOLLOW[list(self.GRAMMA)[0]].append('#')
for key in self.GRAMMA.keys():
for key2 in self.GRAMMA.keys():
for right in self.GRAMMA[key2]:
if key in right:
if key == right[-1]:
for foll in self.FOLLOW[key2]:
if foll not in self.FOLLOW[key]:
self.FOLLOW[key].append(foll)
continue
if right[right.index(key) + 1] in self.VOL['T']:
self.FOLLOW[key].append(right[right.index(key) + 1])
def Form_Display(self):
print("nothing")
if __name__ == '__main__':
test = SLR(GRAMMA, VOL)
test.CREATE_FOLLOW()
test.Create_Producer_Sequence()
test.Create_Project_Sequence()
test.Create_C_Sequence()
for c, i in zip(test.C, range(0, len(test.C))):
print(str(i) + ' : ' + str(c))
| [
"paullinzipeng@gmail.com"
] | paullinzipeng@gmail.com |
8bfe91c55b0d6fb6146284652a0734296c3c8004 | 39ceaf62744febd3f108b98ae71b6c7f75e49cae | /Exemples cours 6/MeteoGUI/meteoGUI.py | 55f9a22ee9b7c61dafae18c7c72e006dbf964071 | [
"Apache-2.0"
] | permissive | mchoquette1984/coursPython | 68cf53367b1dbb0c1ad146d798623222bff06af9 | 74f634a96146c2244f859b9d449534330d9542fb | refs/heads/master | 2023-09-03T11:41:37.128690 | 2021-10-13T01:42:58 | 2021-10-13T01:42:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,299 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'meteoGUI.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import objMeteo
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
self.objM = objMeteo.Meteo('https://meteo.gc.ca/rss/city/qc-133_f.xml')
MainWindow.setObjectName("MainWindow")
MainWindow.resize(400, 150)
MainWindow.setMinimumSize(QtCore.QSize(400, 150))
MainWindow.setMaximumSize(QtCore.QSize(400, 150))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(30, 30, 331, 31))
self.label.setObjectName("label")
self.btnMAJour = QtWidgets.QPushButton(self.centralwidget)
self.btnMAJour.setGeometry(QtCore.QRect(160, 70, 75, 23))
self.btnMAJour.setObjectName("btnMAJour")
self.btnMAJour.clicked.connect(self._afficheMeteo)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 400, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Météo"))
self.label.setText(_translate("MainWindow", ""))
self.btnMAJour.setText(_translate("MainWindow", "Lire Météo"))
def _afficheMeteo(self):
self.label.setText(self.objM.getMeteo())
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| [
"noreply@github.com"
] | noreply@github.com |
f629e04bde6f125a6757fd8ad212af9f11399f1e | 30da023a60f2b7721808a0d49075348d8c7a9ef7 | /activity_prediction/boosting/config.py | f389b5980149928f5964b8708fabce875f6d7b68 | [] | no_license | serp404/ML4PPM | 58da454a9735de9a195fee2a39fd04355412e190 | c3db974ef6a2979be5d2027ab275443f7cd45c1d | refs/heads/main | 2023-07-18T02:23:33.735338 | 2021-08-28T18:19:05 | 2021-08-28T18:19:05 | 392,088,804 | 1 | 0 | null | 2021-08-15T20:49:25 | 2021-08-02T20:42:48 | Jupyter Notebook | UTF-8 | Python | false | false | 403 | py | import numpy as np
from hyperopt import hp
catboost_space = {
'depth': hp.quniform('depth', 2, 16, 1),
'n_estimators': hp.quniform('n_estimators', 80, 124, 1),
'learning_rate': hp.loguniform('learning_rate', np.log(0.001), np.log(0.5)),
'l2_leaf_reg': hp.uniform('l2_leaf_reg', 1, 8),
}
CASE_ID_COLUMN = 'Case ID'
MAX_WINDOW_SIZE = 5
FOLDER_PREFIX = '../data/preprocessed_datasets/' | [
"noreply@github.com"
] | noreply@github.com |
eed94a047c8ceace0d5f1642db2ffe1c7eb3bf0e | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Study/Keras/Chapter_03_Catching_Layer_Concept/sub_03_image_augmentation.py | 5af584ec17aef04328d39886bb785271c2918441 | [] | no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | import numpy as np
# 랜덤시트 고정시키기
np.random.seed(5)
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
# 데이터셋 불러오기
data_aug_gen = ImageDataGenerator(
rescale=1./255,
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=[0.8, 2.0],
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
img = load_img(path='./dataset/handwriting_shape/train/triangle/triangle001.png')
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in data_aug_gen.flow(x, batch_size=1, save_to_dir='./dataset/handwriting_shape/preview', save_prefix='tri',
save_format='png'):
i += 1
if i > 30:
break | [
"broodsky1122@hanmail.net"
] | broodsky1122@hanmail.net |
63aece5376d78fe1adf90813932e843283448f09 | 2b28f749fef34e566b685d520be7ed50f28b7bff | /bondhon_docx/convert_bangla.py | eec2df4268d0d8e6e6bd40811d001112db6fa54b | [
"MIT"
] | permissive | banglakit/bondhon-docx | cc58fea46fd9a50b4559ed26ba2142a5d708423e | a8f6a58995392f420d48f5fc8ec7a25dadeca30a | refs/heads/master | 2020-04-28T12:00:15.608727 | 2019-03-12T18:23:15 | 2019-03-12T18:23:15 | 175,262,079 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | #!/usr/bin/env python
import argparse
import os
from docx import Document
from bondhon_docx import conversion
def main():
parser = argparse.ArgumentParser(description='Convert Bengali Documents between encodings.')
parser.add_argument('from_enc', help='Original Encoding of File')
parser.add_argument('to', help='The Encoding you want to convert to')
parser.add_argument('path', help='The path of the file')
args = parser.parse_args()
document = Document(args.path)
conversion.convert_document(args.from_enc, args.to, document)
path_without_ext, _ = os.path.splitext(args.path)
document.save(path_without_ext + '.converted.docx')
| [
"aniruddha@adhikary.net"
] | aniruddha@adhikary.net |
e08d8817647d2f68bad4873c4900496351629105 | ff61af360de5f0ad3bc53495d110168076444c49 | /python_exercises/check_fermat.py | 2c2eb1faca71cef3d6338a7f3023a18ffa8e2701 | [] | no_license | andreanndrade10/python-algorithms | db8e06bc916ee10d9575c7be5bd5c6dd2a2b78b7 | 1a97d3e128d7c5d5ebb2693716ba773f420fc1cc | refs/heads/master | 2022-12-16T02:35:55.579073 | 2020-09-07T18:51:57 | 2020-09-07T18:51:57 | 270,482,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | def user_interface():
print("Check if Fermat is righ")
a = int(input("A: "))
b = int(input("B: "))
c = int(input("C: "))
n = int(input("n: "))
check_fermat(a,b,c,n)
def check_fermat(a,b,c,n):
first_element = a**n
second_element = b**n
third_element = c**n
if first_element+second_element == third_element:
print('Fermat was wrong!')
else:
print('Fermat is right!')
user_interface()
user_interface() | [
"andre.andrade@ish.com.br"
] | andre.andrade@ish.com.br |
50ebef76bc53f83502193a78637c474c65e7e058 | 73edfaefc413c3cc165576f92b57a55fdaa12e1d | /datadragon.py | f9cee0f14e601ea1207e31258340262cff2aa432 | [] | no_license | rafaelbeckel/ProcessDataChallenge | fe9e086b494007be60c828fa6f6ec395b5e05973 | 032b730af6a152dc810825980e8279ca7855203d | refs/heads/master | 2020-04-05T13:39:53.583256 | 2017-06-29T20:30:00 | 2017-06-29T20:30:00 | 94,951,810 | 0 | 0 | null | 2017-06-21T01:53:04 | 2017-06-21T01:53:04 | null | UTF-8 | Python | false | false | 3,549 | py | import ast
import click
import settings
import multiprocessing as mp
from data.db import DB
from pprint import pprint
from data.seeder import Seeder
from data.cruncher import Cruncher
db = DB()
@click.group(context_settings = dict(help_option_names = ['-h', '--help']))
def main():
pass
@main.command()
@click.option( '--hard', is_flag=True,
help = 'If passed, drops the db instead of its collections.' )
def reset(hard):
"""Drop all Collections so we can start again"""
if hard:
db.hard_reset()
print("Dropped the database!")
else:
db.reset()
print("Dropped all collections from the database!")
@main.command()
@click.option( '--users', default = settings.USERS_COUNT,
help = 'Number of users to be inserted in the database.' )
@click.option( '--products', default = settings.PRODUCTS_COUNT,
help = 'Number of products to be inserted in the database.' )
@click.option( '--batch', default = settings.BATCH_SIZE,
help = 'Number of records to be created in each iteration.' )
@click.option( '--workers', default = mp.cpu_count(),
help = 'Number of child processes to run insertion jobs.' )
@click.option( '--reset', is_flag=True,
help = 'Drop all records before inserting.' )
def generate(users, products, batch, workers, reset):
"""Inserts fake users, products and shopping activity data in MongoDB"""
if (reset):
reset(hard=False)
seeder = Seeder(db.database,
total_products = products,
total_users = users,
batch_size = batch,
workers = workers)
seeder.run()
print('Finished in ' + seeder.elapsed_time + 's')
@main.command()
def crunch():
"""Crunches shopping activity and generates activity collection"""
if (db.database['users'].count() and
db.database['carts'].count() and
db.database['orders'].count() and
db.database['products'].count()):
cruncher = Cruncher(db.database)
cruncher.run()
print('Finished in ' + cruncher.elapsed_time + 's')
else:
print('')
print('You need to run "generate" command first')
@main.command()
@click.option( '--query',
help = 'Queries the new table.')
def find(query):
"""Queries the activity table and print results"""
if db.database['activity'].count():
cursor = db.database['activity'].find( ast.literal_eval(query) )
for document in cursor:
print('_________________________________________')
print('id: ' + str(document.get('customer_id')))
print('E-mail: ' + document.get('email'))
print('Full Name: ' + document.get('full_name'))
print('Average Expenses (last 3 months): ' + str(round(document.get('average_monthly_expenses', 0),2)))
print('')
print('Monthly Expenses:')
pprint(document.get('monthly_expenses'))
print('')
print('Monthly expenses / category:')
pprint(document.get('categorized_monthly_expenses'))
print('')
print('Average monthly expenses / category (last 3 months):')
pprint(document.get('categorized_monthly_expenses'))
print('_________________________________________')
else:
print('')
print('You need to run "generate" and "crunch" commands first')
| [
"rafaelbeckel@gmail.com"
] | rafaelbeckel@gmail.com |
995c7fb086f0b3ce3be2766dfa862208c3486b28 | d52f71cac1c10a8641a18b2b30e789744f3b3ef7 | /Experiments/Yellow_submarine/2019_01_30_ml_approach/src/qmlt/numerical/__init__.py | a1f226b035bbee8776fb49ed53650e6768d1eceb | [] | no_license | BOHRTECHNOLOGY/public_research | 89c67e583b2283f6c67ab33c7303c23bf18467df | d9209f20073d075ae7150250cb1a369f8cb215b7 | refs/heads/master | 2022-12-10T16:47:54.319350 | 2020-01-09T12:51:04 | 2020-01-09T12:51:04 | 143,842,978 | 17 | 5 | null | 2022-12-08T01:40:31 | 2018-08-07T08:26:05 | Python | UTF-8 | Python | false | false | 2,833 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Numerical Quantum Circuit Learner
========================================================
**Module name:** :mod:`qmlt.numerical`
.. currentmodule:: qmlt.numerical
.. codeauthor:: Maria Schuld <maria@xanadu.ai>
This module contains a class to train models for machine learning and optimization based on variational quantum circuits.
The optimization is executed by scipy's numerical optimisation library. The user defines a function that computes
the outputs of the variational circuit, as well as the training objective, and specifies the model and training
hyperparameters.
There are three basic functionalities. The circuit can be trained, run with the current parameters, and scored.
The numerical learner module has been designed for the training of continuous-variable circuits written in StrawberryFields or
BlackBird (using any backend), but is in principle able to train any user-provided model coded in python.
.. note::
Numerical differentiation is not robust, which means that some models fail to be trained. For example, the approximations
of gradients for gradient-based methods are not precise enough to find the steepest descent in plateaus of the
optimization landscape. This can sometimes be rectified by choosing good hyperparameters, but ultimately poses a limit
to training quantum circuits with numerical methods.
CircuitLearner class
---------------------
.. currentmodule:: qmlt.numerical.CircuitLearner
.. autosummary::
train_circuit
run_circuit
score_circuit
get_circuit_parameters
Helper methods
--------------
.. currentmodule:: qmlt.numerical
.. autosummary::
check
check_X
check_Y
check_steps
check_batch_size
check_logs
Code details
------------
"""
from .learner import (CircuitLearner,
_check as check,
_check_X as check_X,
_check_Y as check_Y,
_check_steps as check_steps,
_check_batch_size as check_batch_size,
_check_logs as check_logs)
__all__ = ['CircuitLearner', 'check', 'check_X', 'check_Y', 'check_steps', 'check_batch_size', 'check_logs']
| [
"michal.stechly@gmail.com"
] | michal.stechly@gmail.com |
b89f367ef49e172f226e5c58c00fd91c5901484f | 35eeece7d97eb672542ec69610d65426592c4dfd | /nojapan/tools/crawler.py | d371da5313e023e66afa8ee1d68a5c1b3584a607 | [] | no_license | yprite/PlayGround | 99adf566ca01fd2da9b06d5d59c9398645f9fa2c | d9194cdfa6def5ab578bb8e39483bc39fe290374 | refs/heads/master | 2022-12-08T08:41:16.404651 | 2020-05-08T01:04:04 | 2020-05-08T01:04:04 | 162,648,797 | 1 | 0 | null | 2022-12-04T04:05:53 | 2018-12-21T01:16:40 | C | UTF-8 | Python | false | false | 869 | py | import requests
from bs4 import BeautifulSoup
from nojapan import models
def get_korea_company_info():
def clean_html_tag(raw_html):
import re
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
session = requests.Session()
url = "https://ko.wikipedia.org/wiki/%EB%8C%80%ED%95%9C%EB%AF%BC%EA%B5%AD%EC%9D%98_%EA%B8%B0%EC%97%85_%EB%AA%A9%EB%A1%9D"
content = BeautifulSoup(session.get(url).text, 'html.parser')
for element in content.find('div', id='mw-content-text').find('div', class_="mw-parser-output").find('ul').find_all('a') :
print (element['href'])
print (element.text)
#product, is_created = models.product.objects.get_or_create(name=str(element.text))
#print ("Save " + str(is_created))
#print ("--------------------------------")
| [
"yprite@jjssm.org"
] | yprite@jjssm.org |
9b6caa6210adf537375fc9142ca2126edac15980 | 8304a08e1c1177f87f58ff708785eb3304219405 | /main.py | f60c5821a7e83b4e51cb98a003b927a15ef53545 | [] | no_license | yest/T5-QA | 93c464d27e0c718501403f8b175dde3f35db998d | 0913ac5a508df2306bd2d1272523fe73437be34a | refs/heads/main | 2023-06-14T09:54:56.403697 | 2021-07-08T23:12:13 | 2021-07-08T23:12:13 | 384,002,511 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | from argparse import ArgumentParser
import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import Trainer
from bioqamodel import BioQAModel
from bioqadatamodule import BioQADataModule
from transformers import (
AdamW,
T5ForConditionalGeneration,
T5Tokenizer,
get_linear_schedule_with_warmup
)
def main(hparams):
pl.seed_everything(22)
DATASET_PATH = 'data/bioasq/'
MODEL_NAME = hparams.model
TOKENIZER = T5Tokenizer.from_pretrained(MODEL_NAME)
BATCH_SIZE = hparams.batch_size
EPOCHS = hparams.epochs
train_df = pd.read_csv(f"{DATASET_PATH}train.tsv", sep='\t').dropna()
val_df = pd.read_csv(f"{DATASET_PATH}val.tsv", sep='\t')
test_df = pd.read_csv(f"{DATASET_PATH}test.tsv", sep='\t')
data_module = BioQADataModule(train_df, val_df, test_df, TOKENIZER, batch_size=BATCH_SIZE)
model = BioQAModel(MODEL_NAME)
checkpoint_callback = ModelCheckpoint(
dirpath = "checkpoints",
filename = "best-checkpoint",
save_top_k = 1,
verbose = True,
monitor = "val_loss",
mode = "min"
)
trainer = Trainer(
# logger=logger
callbacks=checkpoint_callback,
max_epochs=EPOCHS,
gpus=1,
progress_bar_refresh_rate=30
)
trainer.fit(model, data_module)
trainer.test()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--model', type=str, default='t5-base')
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--epochs', type=int, default=6)
args = parser.parse_args()
main(args) | [
"yudianto.sujana@gmail.com"
] | yudianto.sujana@gmail.com |
0ec039b3a87e51359999205ac7cc87fccb2fb731 | cb6f4cebd92d611dda3c53621cb4b31ffe51105c | /pi.py | b7d639dba3e2709928e8691295ab6fc794d8b714 | [] | no_license | federicoarmata/mathematical-varieties-and-algos | cf528571faf14eca852f937e20b86cf11df46929 | a44e3f405bda48833130bde5c8e1fcf17c685725 | refs/heads/master | 2020-06-16T11:16:53.612930 | 2019-10-20T19:35:03 | 2019-10-20T19:35:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | from mpmath import *
def pi_generator1(N):
"""
Calculates pi using Bailey–Borwein–Plouffe formula.
Checked until 1000-th decimal digit.
"""
pi = mpf(0)
#setting precision
mp.dps = N + 1
for i in range(0, N):
#n:numerator
# d:denominator of the series
n = (
mpf(4)/mpf(8*i+1)
-mpf(2)/mpf(8*i+4)
-mpf(1)/mpf(8*i+5)
-mpf(1)/mpf(8*i+6))
d = mpf(16)**i
pi += n/d
return pi
def pi_generator2(N):
"""
Calculates pi using a variation of the Bailey–Borwein–Plouffe
formula found by F. Bellard.
See http://mathworld.wolfram.com/PiFormulas.html for more details.
Checked until 1000-th decimal digit
"""
pi = mpf(0)
mp.dps = N + 1
x=0
if N > 1000:
x == N
for i in range(0,x+350):
n1 = mpf((-1)**i)
n2 = (
-mpf(2**5)/mpf(4*i+1)
-mpf(1)/mpf(4*i+3)
+mpf(2**8)/mpf(10*i+1)
-mpf(2**6)/mpf(10*i+3)
-mpf(2**2)/mpf(10*i+5)
-mpf(2**2)/mpf(10*i+7)
+mpf(1)/mpf(10*i+9))
d = mpf(2**(10*i))
pi += n1*n2/d
return pi/mpf(2**6)
def print_pi(N, generator):
"""
Print pi to the N-th decimal digit (rounding)
"""
print(nstr(generator(N), N+1))
if __name__ == "__main__":
N = 100
print_pi(N, pi_generator1)
print_pi(N, pi_generator2)
| [
"federicoarmata@msn.com"
] | federicoarmata@msn.com |
f7c7a547727f1734a58d8396064a913242f0d901 | 10e0458c38f3e625311f032fb2ccbc304a0c0db3 | /romantest1.py | 6f199a8b7db14ba18097143a81943a78a5b8158b | [] | no_license | ramonbadillo/Tutorial_Python_Unity | d4509fc4005d9db28b39e96b2e4ed99b1c3fae2c | e2edcfe69eefa7f96bb0deac17856e833e7476b3 | refs/heads/master | 2021-01-01T15:59:54.132403 | 2014-09-10T00:19:58 | 2014-09-10T00:19:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,686 | py | import roman
import unittest
class KnownValues(unittest.TestCase):
known_values = ( (1, 'I'),
(2, 'II'),
(3, 'III'),
(4, 'IV'),
(5, 'V'),
(6, 'VI'),
(7, 'VII'),
(8, 'VIII'),
(9, 'IX'),
(10, 'X'),
(50, 'L'),
(100, 'C'),
(500, 'D'),
(1000, 'M'),
(31, 'XXXI'),
(148, 'CXLVIII'),
(294, 'CCXCIV'),
(312, 'CCCXII'),
(421, 'CDXXI'),
(528, 'DXXVIII'),
(621, 'DCXXI'),
(782, 'DCCLXXXII'),
(870, 'DCCCLXX'),
(941, 'CMXLI'),
(1043, 'MXLIII'),
(1110, 'MCX'),
(1226, 'MCCXXVI'),
(1301, 'MCCCI'),
(1485, 'MCDLXXXV'),
(1509, 'MDIX'),
(1607, 'MDCVII'),
(1754, 'MDCCLIV'),
(1832, 'MDCCCXXXII'),
(1993, 'MCMXCIII'),
(2074, 'MMLXXIV'),
(2152, 'MMCLII'),
(2212, 'MMCCXII'),
(2343, 'MMCCCXLIII'),
(2499, 'MMCDXCIX'),
(2574, 'MMDLXXIV'),
(2646, 'MMDCXLVI'),
(2723, 'MMDCCXXIII'),
(2892, 'MMDCCCXCII'),
(2975, 'MMCMLXXV'),
(3051, 'MMMLI'),
(3185, 'MMMCLXXXV'),
(3250, 'MMMCCL'),
(3313, 'MMMCCCXIII'),
(3408, 'MMMCDVIII'),
(3501, 'MMMDI'),
(3610, 'MMMDCX'),
(3743, 'MMMDCCXLIII'),
(3844, 'MMMDCCCXLIV'),
(3888, 'MMMDCCCLXXXVIII'),
(3940, 'MMMCMXL'),
(3999, 'MMMCMXCIX'))
def test_to_roman_known_values(self):
'''to_roman should give known result with known input'''
for integer, numeral in self.known_values:
result = roman.to_roman(integer)
self.assertEqual(numeral, result)
def test_from_roman_known_values(self):
'''from_roman should give known result with known input'''
for integer, numeral in self.known_values:
result = roman.from_roman(numeral)
self.assertEqual(integer, result)
class ToRomanBadInput(unittest.TestCase):
def test_too_large(self):
'''to_roman should fail with large input'''
self.assertRaises(roman.OutOfRangeError, roman.to_roman, 4000)
def test_zero(self):
'''to_roman should fail with 0 input'''
self.assertRaises(roman.OutOfRangeError, roman.to_roman, 0)
def test_negative(self):
'''to_roman should fail with negative input'''
self.assertRaises(roman.OutOfRangeError, roman.to_roman, -1)
def test_non_integer(self):
'''to_roman should fail with non-integer input'''
self.assertRaises(roman.NotIntegerError, roman.to_roman, 0.5)
class RoundtripCheck(unittest.TestCase):
def test_roundtrip(self):
'''from_roman(to_roman(n))==n for all n'''
for integer in range(1, 4000):
numeral = roman.to_roman(integer)
result = roman.from_roman(numeral)
self.assertEqual(integer, result)
class FromRomanBadInput(unittest.TestCase):
def test_too_many_repeated_numerals(self):
'''from_roman should fail with too many repeated numerals'''
for s in ('MMMM', 'DD', 'CCCC', 'LL', 'XXXX', 'VV', 'IIII'):
self.assertRaises(roman.InvalidRomanNumeralError, roman.from_roman, s)
def test_repeated_pairs(self):
'''from_roman should fail with repeated pairs of numerals'''
for s in ('CMCM', 'CDCD', 'XCXC', 'XLXL', 'IXIX', 'IVIV'):
self.assertRaises(roman.InvalidRomanNumeralError, roman.from_roman, s)
def test_malformed_antecedents(self):
'''from_roman should fail with malformed antecedents'''
for s in ('IIMXCC', 'VX', 'DCM', 'CMM', 'IXIV',
'MCMC', 'XCX', 'IVI', 'LM', 'LD', 'LC'):
self.assertRaises(roman.InvalidRomanNumeralError, roman.from_roman, s)
if __name__ == '__main__':
unittest.main() | [
"robr8@hotmail.com"
] | robr8@hotmail.com |
a525b11efc8f9cad4fe2b462aa96b35638be7b1b | fc169e7ba08b43647d22375c1bfd58636265c852 | /day21/solution.py | e770086bee26ed0c851a1500d9f82258ffa5dc70 | [] | no_license | simonhessner/adventofcode-2017 | b67da94900f20130730b380e18696fd64edb4eb1 | 1eb8293b8dbf6f0ba491e968c3285137489432a1 | refs/heads/master | 2021-09-01T10:57:23.255160 | 2017-12-26T16:38:55 | 2017-12-26T16:38:55 | 112,945,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,017 | py | #!/usr/bin/python3
# http://adventofcode.com/2017/day/21
import numpy as np
# numpy 2D array to string representation
def get_strrep(patch):
return "/".join("".join(patch[k]) for k in range(len(patch)))
# string representation to 2D numpy array
def get_arrrep(strrep):
return np.array([list(x) for x in strrep.split("/")])
# Iterates over the grid, splits it into local patches of
# given size and applies the replacement rules to each patch
def do_replacement(grid, rules, amount, size):
columns = []
for r in range(amount):
rows = []
for c in range(amount):
patch = grid[r*size:r*size+size,c*size:c*size+size]
src = get_strrep(patch)
assert src in rules, "No rule found for '%s'" % src
dst = get_arrrep(rules[src])
rows.append(dst)
columns.append(np.concatenate(rows, 1))
return np.concatenate(columns, 0)
def run(iterations):
with open("input") as inputfile:
lines = inputfile.read().splitlines()
rules = {x[0] : x[1] for x in (x.split(" => ") for x in lines)}
for src in list(rules):
dst = rules[src]
srcparts = get_arrrep(src)
# rotate by 0 / 90 / 180 / 270 degree and add new rule to rules
# additionally every rotated rule is also flipped horicontally
# and added to the rules list
for k in range(4):
rotatedarr = np.rot90(srcparts, k, (0,1))
rotatedstr = get_strrep(rotatedarr)
if rotatedstr not in rules:
rules[rotatedstr] = dst
flippedarr = np.flip(rotatedarr, 1)
flippedstr = get_strrep(flippedarr)
if flippedstr not in rules:
rules[flippedstr] = dst
grid = np.array([[".", "#", "."],
[".", ".", "#"],
["#", "#", "#"]])
for it in range(iterations):
size = grid.shape[0]
if size % 2 == 0:
grid = do_replacement(grid, rules, size//2, 2)
elif size % 3 == 0:
grid = do_replacement(grid, rules, size//3, 3)
else:
print("size %2 and %3 not 0")
return sum((sum(c == "#" for c in r)) for r in grid)
print("part 1", run(5))
print("part 1", run(18)) | [
"uldci@student.kit.edu"
] | uldci@student.kit.edu |
277cf726bc39c8aeaa1975b9063a1ca2bc1ccf7c | 181bf2809c259c77a1019103e6d15db29a968e48 | /rate_counter/manage.py | 7629f3a5e7475a0e6439febffd051b65fbb1f454 | [] | no_license | wolendranh/rate_counter | 22992099cadc549c8ac860532348fa05c860833e | db19120a029b5e194e2ac7df395ff4027b021c0d | refs/heads/master | 2021-01-21T04:55:26.189889 | 2016-04-01T08:21:12 | 2016-04-01T08:21:12 | 54,800,066 | 2 | 1 | null | 2016-05-15T02:55:13 | 2016-03-26T21:18:55 | Python | UTF-8 | Python | false | false | 255 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rate_counter.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"hulpa.yura@gmail.com"
] | hulpa.yura@gmail.com |
e95d87395e03d312fee41a2854417872ff4ca942 | fb9196c79a526ab14b97131343db8582900c8e48 | /TP/TP2/Code/Python/1C.py | ff6dce4c8626c7999cebd4efb703c053dfcecf7a | [
"MIT"
] | permissive | suyundukov/LIFAP1 | 47a1f97275e96aa49fa024e793bc880d32270f83 | b5da0395de9775cd8b319b56af7a6d0257d542db | refs/heads/master | 2022-02-09T16:37:29.788188 | 2019-08-05T12:57:49 | 2019-08-05T12:57:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #!/usr/bin/python
# Calculer la somme des N premières puissances de 2
from math import pow
def sum_pow(val):
sum_val = 0
for i in range(val):
sum_val += pow(2, i) # I ❤ Python : sum_val += 2 ** i
return sum_val
print('Donne moi une valeur : ', end='')
foo = int(input())
print('La somme de', foo, 'première puissance de 2 est', sum_pow(foo))
| [
"nurlansu@users.noreply.github.com"
] | nurlansu@users.noreply.github.com |
f2cdd7cfeb9f4995ded2abbab206cfba0a41d618 | b63a90f9a0a9afb586b496dcd8d9176a4ce8a0ea | /src/main.py | 8556790dc76d7767e5a134358db4706292cc0510 | [] | no_license | romanav/signal_temporal_logic | 6e957255b383a8c72a81e4695bca897999ab46ba | 9442fb44c7ad84947029d5edadbd1b1c59088e35 | refs/heads/master | 2022-12-01T22:13:11.550699 | 2020-08-14T09:41:48 | 2020-08-14T09:41:48 | 279,386,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import numpy as np
from src.robustness import feature_, not_, sat_
import matplotlib.pyplot as plt
t = np.arange(0, 4 * np.pi, 0.1)
print(f'signal length: {len(t)}')
sig = np.sin(t)
# exp = list(not_(feature_(sig - 0.25, (1, 2))))
exp = list(sat_(sig-0.25))
plt.plot(range(len(exp)), exp, sig)
# plt.plot(range(472), exp, sig)
# plt.ylim(-2, 2)
plt.show()
#
| [
"romanaverkin@gmail.com"
] | romanaverkin@gmail.com |
4968df6c80dc6e6b890a82f2d92dde5867592ea9 | b480bd20366ccb23e32cb07bcc73579cf33ba96b | /idea4u/views.py | 6d0239545ace0600d2d69d278ed6245192d0f853 | [
"MIT"
] | permissive | TangZhongham/Idea4U | b35a00673a8636d9b46bbed181ccb15ecb8f4d83 | c19093b1e7a8aa60d80a028f0495cc5a4721e8bb | refs/heads/master | 2020-06-27T12:58:52.606819 | 2019-08-02T08:53:41 | 2019-08-02T08:53:41 | 199,960,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | from flask import flash, redirect, url_for, render_template
from idea4u.models import Idea
from idea4u.form import IdeaForm
from idea4u import app, db
# Application Routes
@app.route('/', methods=['GET', 'POST'])
def index():
messages = Idea.query.order_by(Idea.timestamp.desc()).all()
form = IdeaForm()
if form.validate_on_submit():
topic = form.topic.data
idea = form.idea.data
writer = form.writer.data
msg = Idea(topic=topic, idea=idea, writer=writer)
db.session.add(msg)
db.session.commit()
flash("你的好点子已经提交!")
return redirect(url_for('index'))
return render_template('index.html', form=form, messages=messages)
| [
"13122260573@163.com"
] | 13122260573@163.com |
4013fcc598254b2c31c9a6d62683192317037477 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.2/tests/regressiontests/test_utils/models.py | 514cf46f7b562720f2894129b24d62b046c1f3c1 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.2/tests/regressiontests/test_utils/models.py | [
"ron.y.kagan@gmail.com"
] | ron.y.kagan@gmail.com |
786e15a926f9ea9ba51dff0e7cfd6b90ea532743 | cf14b6ee602bff94d3fc2d7e712b06458540eed7 | /gs24/enroll/urls.py | c58191feb75d1b077f6411cb53f93548cd76ff79 | [] | no_license | ManishShah120/Learning-Django | 8b0d7bfe7e7c13dcb71bb3d0dcdf3ebe7c36db27 | 8fe70723d18884e103359c745fb0de5498b8d594 | refs/heads/master | 2023-03-29T09:49:47.694123 | 2021-03-28T16:04:34 | 2021-03-28T16:04:34 | 328,925,596 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | from django.urls import path
from .views import studentinfo
urlpatterns = [
path('stu/', studentinfo, name='studetails'),
]
| [
"mkshah141@gmail.com"
] | mkshah141@gmail.com |
1f8aca95192f02d0291addff26d41d499dee3b82 | 885241cd98a7798cd982a209e334fc71f9c390e5 | /streamfieldblocks/models.py | 9d18073a779d5636a4684f81a262a5fe2d75518b | [] | no_license | mcwaage1/wagtail-devportfolio | 364faa6b6789f640ea74db445fd300fd5a6e3629 | 1b105d0e5d781d7358083fa602958deaad1c9305 | refs/heads/main | 2023-09-06T04:46:21.722717 | 2021-11-22T01:13:05 | 2021-11-22T01:13:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,497 | py | from django.db import models
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class ResponsiveImageBlock(ImageChooserBlock):
class Meta:
icon = "image"
template = "streamfieldblocks/responsive_image_block.html"
class CardBlock(blocks.StructBlock):
image = ImageChooserBlock(required=False)
title = blocks.CharBlock()
body = blocks.TextBlock()
page_link = blocks.PageChooserBlock()
class Meta:
icon = "placeholder"
template = "streamfieldblocks/card_block.html"
class SimpleRichTextBlock(blocks.StructBlock):
richtext = blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul'])
class Meta:
icon = "pilcrow"
template = "streamfieldblocks/simple_richtext_block.html"
class CarouselBlock(blocks.StreamBlock):
image = ImageChooserBlock()
class Meta:
icon = "cog"
template = "streamfieldblocks/carousel_block.html"
class FlushListBlock(blocks.StructBlock):
items = blocks.ListBlock(
blocks.TextBlock(help_text="List item's body text.")
)
class Meta:
icon = "list-ul"
template = "streamfieldblocks/flush_list_block.html"
class TimelineBlock(blocks.StructBlock):
title = blocks.CharBlock(max_length=100)
text = blocks.TextBlock()
date = blocks.DateBlock()
class Meta:
icon = "placeholder"
template = "streamfieldblocks/timeline_block.html" | [
"semperindoctrina19@gmail.com"
] | semperindoctrina19@gmail.com |
1b749635440a04b0bac6fafd4c193ccf950c50cf | 74f6c5b2fbc1c868c7fabda7486eabfdb4466f1b | /3.py | cfb74d4aefa5c2f88ae2f231da701c52336d0fce | [] | no_license | jcstoltzfus/project-euler | 0eec3e3275f1d4d0a2cca2cc1df7426fcac0a89d | 50439a22d65eb029794c25346dc62702558728fb | refs/heads/master | 2020-03-30T11:19:52.795796 | 2018-10-01T22:33:56 | 2018-10-01T22:33:56 | 151,167,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | '''Question: The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143 ?'''
#DOESN'T WORK
def find_prime_factors(n, lst):
i = 2
while(i < int(n ** .5) + 1):
if(n % i == 0):
lst.append(i)
print(i)
n = n / i
if(n == 1):
n = 0
#find_prime_factors(n, lst)
else:
i = i + 1
print(lst)
find_prime_factors(12, [])
#find_prime_factors(13195, []) | [
"stoltzfus.eelman@gmail.com"
] | stoltzfus.eelman@gmail.com |
79f7f6729bc086b71b77bf9c3640d6cbb329aef3 | a67e6f0314e707e38382c64c3a7eec958463a79d | /Ihome/migrations/versions/cb32832dd761_init_tables.py | b9cf9249bc2c8470172eb096398c9394314ee972 | [] | no_license | shiyi-11/learngit | dc23b8a69c6b0139cd5d281d659d52448694bca6 | acd12e1c1154333a2afb7222cae01be51bd85bca | refs/heads/master | 2022-12-12T10:39:00.882833 | 2020-04-25T06:09:56 | 2020-04-25T06:09:56 | 247,857,863 | 0 | 0 | null | 2022-12-08T01:23:40 | 2020-03-17T02:07:13 | HTML | UTF-8 | Python | false | false | 5,295 | py | """init tables
Revision ID: cb32832dd761
Revises:
Create Date: 2020-02-23 12:25:52.278535
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cb32832dd761'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('ih_area_info',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('ih_facility_info',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('ih_user_profile',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=32), nullable=False),
sa.Column('password', sa.String(length=128), nullable=False),
sa.Column('phone_num', sa.String(length=11), nullable=False),
sa.Column('real_name', sa.String(length=32), nullable=True),
sa.Column('id_card', sa.String(length=20), nullable=True),
sa.Column('avatar_url', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name'),
sa.UniqueConstraint('phone_num')
)
op.create_table('ih_house_info',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('area_id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=64), nullable=False),
sa.Column('price', sa.Integer(), nullable=True),
sa.Column('address', sa.String(length=512), nullable=True),
sa.Column('room_count', sa.Integer(), nullable=True),
sa.Column('acreage', sa.Integer(), nullable=True),
sa.Column('unit', sa.String(length=32), nullable=True),
sa.Column('capacity', sa.Integer(), nullable=True),
sa.Column('beds', sa.String(length=64), nullable=True),
sa.Column('deposit', sa.Integer(), nullable=True),
sa.Column('min_days', sa.Integer(), nullable=True),
sa.Column('max_days', sa.Integer(), nullable=True),
sa.Column('order_count', sa.Integer(), nullable=True),
sa.Column('index_image_url', sa.String(length=256), nullable=True),
sa.ForeignKeyConstraint(['area_id'], ['ih_area_info.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ih_user_profile.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('ih_house_facility',
sa.Column('house_id', sa.Integer(), nullable=False),
sa.Column('facility_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['facility_id'], ['ih_facility_info.id'], ),
sa.ForeignKeyConstraint(['house_id'], ['ih_house_info.id'], ),
sa.PrimaryKeyConstraint('house_id', 'facility_id')
)
op.create_table('ih_house_image',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('house_id', sa.Integer(), nullable=False),
sa.Column('url', sa.String(length=256), nullable=False),
sa.ForeignKeyConstraint(['house_id'], ['ih_house_info.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('ih_order_info',
sa.Column('create_time', sa.DateTime(), nullable=True),
sa.Column('update_time', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('house_id', sa.Integer(), nullable=False),
sa.Column('begin_date', sa.DateTime(), nullable=False),
sa.Column('end_date', sa.DateTime(), nullable=False),
sa.Column('days', sa.Integer(), nullable=False),
sa.Column('house_price', sa.Integer(), nullable=False),
sa.Column('amount', sa.Integer(), nullable=False),
sa.Column('status', sa.Enum('WAIT_ACCEPT', 'WAIT_PAYMENT', 'PAID', 'WAIT_COMMENT', 'COMPLETE', 'CANCELED', 'REJECTED'), nullable=True),
sa.Column('comment', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['house_id'], ['ih_house_info.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['ih_user_profile.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_ih_order_info_status'), 'ih_order_info', ['status'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_ih_order_info_status'), table_name='ih_order_info')
op.drop_table('ih_order_info')
op.drop_table('ih_house_image')
op.drop_table('ih_house_facility')
op.drop_table('ih_house_info')
op.drop_table('ih_user_profile')
op.drop_table('ih_facility_info')
op.drop_table('ih_area_info')
# ### end Alembic commands ###
| [
"shiyixuan_11@163.com"
] | shiyixuan_11@163.com |
7f678d20cd9ccb76a755129b262da9ec03d23e1b | e144cd77fdf1911fc771ff019e7aa1c41e5da0a1 | /setup.py | d18a75df333cb0a9563054658adf0121536c5d80 | [] | no_license | alekam/django-simplepages | 68d0fb7025c38ccba0b29910f58367788594c78b | 242e01e1f1d259b329ea0fdb7c470c1448e7c500 | refs/heads/master | 2021-01-16T02:45:45.281535 | 2010-11-27T14:51:10 | 2010-11-27T14:51:10 | 1,116,991 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | from setuptools import setup, find_packages
setup(
name = "simplepages",
version = '0.54.0',
description = "A basic site structure and pages app",
keywords = "django, cms, pages, contentmanagement",
author = "Alex Kamedov",
author_email = "alex@kamedov.ru",
url = "git@3128.ru:repos/django-snippets.git",
license = "New BSD License",
platforms = ["any"],
classifiers = ["Development Status :: stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Utilities"],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
package_data = {
'linker': [
'locale/*/LC_MESSAGES/*.*',
'templates/*',
'media/*',
],
},
)
| [
"alex@kamedov.ru"
] | alex@kamedov.ru |
f0487401391b4b4aa4c678313efbe02c01927f17 | 3140daf42a083f126e770fbffa2f20319f35cc8d | /dapg/examples/dapg.py | cc49f39ed641fc749766b94445078a12e17dbae4 | [
"Apache-2.0"
] | permissive | hyzcn/hand_dapg | c7714408ade17189cd581ca27418b374a62c50a3 | 9cd081dd1147e7a23f13cca1bb818ab13c38407a | refs/heads/master | 2020-06-25T00:09:28.722724 | 2019-07-19T22:24:59 | 2019-07-19T22:24:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,147 | py | """
This script illustrates initializing with behavior cloning
and finetuning policy with DAPG.
"""
from mjrl.utils.gym_env import GymEnv
from mjrl.policies.gaussian_mlp import MLP
from mjrl.baselines.mlp_baseline import MLPBaseline
from mjrl.algos.dapg import DAPG
from mjrl.algos.behavior_cloning_2 import BC
from mjrl.utils.train_agent import train_agent
import mj_envs
import time as timer
import pickle
SEED = 100
# ------------------------------
# Get demonstrations
print("========================================")
print("Collecting expert demonstrations")
print("========================================")
demo_paths = pickle.load(open('../demonstrations/relocate-v0_demos.pickle', 'rb'))
# ------------------------------
# Train BC
e = GymEnv('relocate-v0')
policy = MLP(e.spec, hidden_sizes=(32,32), seed=SEED)
bc_agent = BC(demo_paths, policy=policy, epochs=5, batch_size=32, lr=1e-3)
ts = timer.time()
print("========================================")
print("Running BC with expert demonstrations")
print("========================================")
bc_agent.train()
print("========================================")
print("BC training complete !!!")
print("time taken = %f" % (timer.time()-ts))
print("========================================")
score = e.evaluate_policy(policy, num_episodes=10, mean_action=True)
print("Score with behavior cloning = %f" % score[0][0])
# ------------------------------
# Finetune with DAPG
print("========================================")
print("Finetuning with DAPG")
baseline = MLPBaseline(e.spec, reg_coef=1e-3, batch_size=64, epochs=2, learn_rate=1e-3)
agent = DAPG(e, policy, baseline, demo_paths=demo_paths, normalized_step_size=0.1,
seed=SEED, lam_0=1e-2, lam_1=0.99, save_logs=True)
ts = timer.time()
train_agent(job_name='relocate_demo_init_dapg',
agent=agent,
seed=SEED,
niter=100,
gamma=0.995,
gae_lambda=0.97,
num_cpu=5,
sample_mode='trajectories',
num_traj=200,
save_freq=25,
evaluation_rollouts=20)
print("time taken = %f" % (timer.time()-ts))
| [
"rajeswaran.aravind@gmail.com"
] | rajeswaran.aravind@gmail.com |
5d53415e6a69d775523d61d30add80344da10e0c | f74032ab5e361c3f528d8b4d16f0b425ce85b8eb | /helloworld_project/settings.py | 6dec3660759a4e342b6ede5b477a0b5235e871f9 | [] | no_license | shweta1207/django-helloworld | 47f0062d69cc032cc0cc5da6d15604a0a5d9d736 | b52279f26af281aaa6d9698a74b56b0b91e392b8 | refs/heads/master | 2023-04-30T14:41:17.068772 | 2019-11-09T09:02:52 | 2019-11-09T09:02:52 | 220,617,485 | 0 | 0 | null | 2023-04-21T20:40:51 | 2019-11-09T09:06:50 | Python | UTF-8 | Python | false | false | 3,152 | py | """
Django settings for helloworld_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!f^b77p$%l-9hio-fahj4#m4t%s^9jj&mg*wy37pg%1##f3jck'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pages.apps.PagesConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'helloworld_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'helloworld_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"shweta1.singh@orange.com"
] | shweta1.singh@orange.com |
38fd11aa7c506efa49b6de2a5c4c9d8db6977752 | 0667af1539008f9c6c0dcde2d3f50e8bbccf97f3 | /source/rttov_test/profile-datasets-py/div83/070.py | 2f10763fdc3f1b7acb4813b3e935a503943ac821 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bucricket/projectMAScorrection | bc6b90f07c34bf3e922225b2c7bd680955f901ed | 89489026c8e247ec7c364e537798e766331fe569 | refs/heads/master | 2021-01-22T03:54:21.557485 | 2019-03-10T01:47:32 | 2019-03-10T01:47:32 | 81,468,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,446 | py | """
Profile ../profile-datasets-py/div83/070.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div83/070.py"
self["Q"] = numpy.array([ 2.96115100e+00, 3.39443800e+00, 4.23145200e+00,
5.32973200e+00, 5.94539500e+00, 5.61806800e+00,
5.33932100e+00, 5.83540600e+00, 6.51451800e+00,
6.65043600e+00, 6.55529700e+00, 6.44838800e+00,
6.31400000e+00, 6.12939200e+00, 5.89224500e+00,
5.61127900e+00, 5.25048200e+00, 4.95066500e+00,
4.67371800e+00, 4.41607000e+00, 4.19023200e+00,
4.03347400e+00, 3.90907500e+00, 3.80890500e+00,
3.72809600e+00, 3.66258700e+00, 3.59588700e+00,
3.52695800e+00, 3.46347800e+00, 3.40734800e+00,
3.35437900e+00, 3.29386900e+00, 3.23485000e+00,
3.14360000e+00, 3.02604100e+00, 2.91984100e+00,
2.84811200e+00, 2.80580200e+00, 2.77955200e+00,
2.76484200e+00, 2.75856200e+00, 2.76287200e+00,
2.77901200e+00, 2.78894200e+00, 2.78647200e+00,
2.77288200e+00, 2.76076200e+00, 2.76365200e+00,
2.74351200e+00, 2.72718300e+00, 2.80359200e+00,
3.31174900e+00, 4.34185100e+00, 6.46307800e+00,
1.06424900e+01, 1.60878400e+01, 1.63576300e+01,
1.71113100e+01, 1.90418400e+01, 2.31223700e+01,
3.08604500e+01, 4.50171700e+01, 6.65996600e+01,
8.22580300e+01, 9.53938000e+01, 1.13173200e+02,
1.44493100e+02, 1.71137700e+02, 1.90849600e+02,
2.00276900e+02, 2.16941900e+02, 2.48781100e+02,
3.30323900e+02, 4.11388700e+02, 4.60154200e+02,
3.54358400e+02, 3.09988900e+02, 3.09470200e+02,
3.72736000e+02, 4.40020300e+02, 5.15807800e+02,
6.12176000e+02, 6.69989800e+02, 6.82229200e+02,
6.88011300e+02, 8.48419600e+02, 1.07039300e+03,
9.75860800e+02, 6.97745800e+02, 6.90916300e+02,
1.42792800e+03, 2.93211700e+03, 4.33631400e+03,
4.80827900e+03, 5.10875700e+03, 5.57655800e+03,
7.39840600e+03, 7.53264900e+03, 8.02216400e+03,
7.80660700e+03, 7.59956400e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17778000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23442000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90893000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53628000e+02, 7.77790000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31524000e+02, 9.58591000e+02,
9.86067000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 377.2399, 377.2387, 377.2354, 377.231 , 377.2238, 377.2129,
377.196 , 377.1698, 377.1305, 377.0945, 377.0805, 377.1046,
377.1886, 377.4377, 377.7968, 378.1419, 378.454 , 378.7141,
378.9302, 379.1213, 379.3134, 379.4245, 379.4775, 379.4286,
379.3536, 379.2596, 379.1476, 379.0247, 378.8627, 378.6927,
378.5567, 378.4108, 378.4178, 378.4328, 378.5689, 378.7689,
378.9979, 379.2789, 379.5759, 379.743 , 379.903 , 380.013 ,
380.0599, 380.1129, 380.3039, 380.5039, 380.6779, 380.8329,
380.985 , 381.068 , 381.1539, 381.3877, 381.6943, 381.9965,
382.2619, 382.5358, 382.7957, 383.0594, 383.2827, 383.4611,
383.6232, 383.6757, 383.7264, 383.7334, 383.7334, 383.7236,
383.7005, 383.6753, 383.6428, 383.6142, 383.5838, 383.5456,
383.5003, 383.4562, 383.4345, 383.4761, 383.5031, 383.5173,
383.509 , 383.5022, 383.4781, 383.4401, 383.395 , 383.3623,
383.3181, 383.2226, 383.1175, 382.9749, 382.9216, 382.8663,
382.5689, 381.9757, 381.4218, 381.2271, 381.0961, 380.902 ,
380.1902, 380.1289, 379.9335, 380.0121, 380.0894])
self["CO"] = numpy.array([ 0.02209863, 0.02343462, 0.02636759, 0.03232103, 0.04441274,
0.0705057 , 0.1377673 , 0.3534269 , 0.461836 , 0.448828 ,
0.30839 , 0.1443591 , 0.0477044 , 0.02142597, 0.02094568,
0.02113658, 0.02141849, 0.0209528 , 0.02028611, 0.01988771,
0.01996482, 0.01988972, 0.01966442, 0.01913733, 0.01849003,
0.01775643, 0.01725304, 0.01676274, 0.01639724, 0.01602755,
0.01581625, 0.01559305, 0.01559965, 0.01561665, 0.01591045,
0.01635915, 0.01689455, 0.01758455, 0.01834365, 0.01884835,
0.01935865, 0.01995024, 0.02063974, 0.02140204, 0.02298004,
0.02475493, 0.02631743, 0.02770002, 0.02908402, 0.02962222,
0.03019272, 0.0316051 , 0.03353225, 0.03579607, 0.03871269,
0.04198372, 0.04633544, 0.05141782, 0.05625363, 0.0606512 ,
0.06506769, 0.06728707, 0.06965246, 0.07065419, 0.07140839,
0.07176868, 0.07178553, 0.07174982, 0.07157794, 0.0714007 ,
0.07118445, 0.07096244, 0.07084199, 0.07073449, 0.07069745,
0.07069484, 0.07069948, 0.07070691, 0.07073942, 0.07078744,
0.07078517, 0.07076295, 0.07065773, 0.07053265, 0.07032628,
0.07011017, 0.06989271, 0.06894036, 0.06801451, 0.06735533,
0.06722317, 0.0670592 , 0.06686429, 0.06673387, 0.06665921,
0.06658212, 0.06643731, 0.06641224, 0.06656013, 0.06681143,
0.06706642])
self["T"] = numpy.array([ 196.811, 204.304, 217.854, 233.389, 248.33 , 260.214,
266.226, 265.259, 257.043, 242.866, 228.387, 223.547,
221.781, 221.163, 220.489, 219.198, 217.572, 214.833,
211.843, 209.215, 207.518, 207.486, 208.183, 209.263,
210.647, 212.484, 214.098, 215.184, 215.87 , 216.185,
216.149, 215.884, 215.792, 215.469, 214.984, 214.746,
215.066, 215.903, 216.949, 217.9 , 218.537, 218.612,
218.158, 217.61 , 217.538, 218.073, 218.871, 219.311,
219.09 , 218.297, 217.175, 216.108, 214.708, 212.93 ,
210.93 , 209.05 , 208.321, 209.076, 210.395, 212.004,
213.744, 215.541, 217.436, 219.439, 221.535, 223.702,
225.918, 228.187, 230.5 , 232.779, 235.027, 237.244,
239.332, 241.432, 243.59 , 245.912, 248.205, 250.491,
252.742, 254.943, 257.096, 259.143, 261.125, 263.048,
264.839, 266.281, 267.58 , 269.085, 270.573, 271.785,
272.39 , 272.482, 272.773, 273.871, 275.725, 277.517,
279.019, 281.268, 282.488, 282.488, 282.488])
self["N2O"] = numpy.array([ 0.01098997, 0.00728997, 0.00453998, 0.00265999, 0.00247999,
0.00200999, 0.00114999, 0.00069 , 0.00117999, 0.00162999,
0.00288998, 0.00513997, 0.00804995, 0.01392991, 0.02119988,
0.03283982, 0.04253978, 0.04902976, 0.05510974, 0.06037973,
0.06539973, 0.07965968, 0.09801962, 0.1156096 , 0.1445195 ,
0.1738694 , 0.2021193 , 0.2240692 , 0.2434892 , 0.2622491 ,
0.2783291 , 0.2858791 , 0.2931891 , 0.3002791 , 0.3046291 ,
0.3074991 , 0.3102091 , 0.3113491 , 0.3123391 , 0.3132791 ,
0.3141391 , 0.3149891 , 0.3158391 , 0.3166791 , 0.3175091 ,
0.3183191 , 0.3190891 , 0.3198291 , 0.3205191 , 0.3211491 ,
0.3217191 , 0.3222089 , 0.3224086 , 0.3225979 , 0.3227566 ,
0.3228948 , 0.3230147 , 0.3231045 , 0.3231538 , 0.3231725 ,
0.32317 , 0.3231655 , 0.3231585 , 0.3231534 , 0.3231492 ,
0.3231434 , 0.3231333 , 0.3231247 , 0.3231183 , 0.3231153 ,
0.3231099 , 0.3230996 , 0.3230732 , 0.323047 , 0.3230313 ,
0.3230655 , 0.3230798 , 0.32308 , 0.3230595 , 0.3230378 ,
0.3230133 , 0.3229822 , 0.3229635 , 0.3229595 , 0.3229576 ,
0.3229058 , 0.3228341 , 0.3228646 , 0.3229545 , 0.3229567 ,
0.3227185 , 0.3222324 , 0.3217786 , 0.3216261 , 0.321529 ,
0.3213778 , 0.320789 , 0.3207456 , 0.3205874 , 0.3206571 ,
0.320724 ])
self["O3"] = numpy.array([ 0.1602895 , 0.1751264 , 0.2174751 , 0.3374902 , 0.5335188 ,
0.7633317 , 1.037814 , 1.460131 , 2.162996 , 3.224179 ,
4.53031 , 5.466185 , 6.217261 , 6.769869 , 7.127218 ,
7.306899 , 7.309212 , 7.264534 , 7.173686 , 7.048029 ,
6.909961 , 6.743313 , 6.517205 , 6.247726 , 5.956838 ,
5.674419 , 5.417091 , 5.175832 , 4.917243 , 4.617564 ,
4.272526 , 3.918947 , 3.711748 , 3.457549 , 3.109261 ,
2.729652 , 2.387403 , 2.102434 , 1.848435 , 1.578356 ,
1.283646 , 1.063227 , 0.9494254 , 0.8682006 , 0.7912998 ,
0.729591 , 0.71388 , 0.6975461 , 0.726947 , 0.7552989 ,
0.6827211 , 0.5344572 , 0.4157442 , 0.3371368 , 0.2526923 ,
0.1435567 , 0.08720537, 0.06677076, 0.05481476, 0.04785379,
0.04413724, 0.04291547, 0.04383858, 0.04436565, 0.04414069,
0.04283575, 0.04163488, 0.04011853, 0.03831519, 0.03676863,
0.03569176, 0.03506188, 0.03532053, 0.03546251, 0.03550216,
0.03530918, 0.03537293, 0.03567126, 0.03650509, 0.0372565 ,
0.0377927 , 0.03774168, 0.0365436 , 0.03488379, 0.03320204,
0.03254936, 0.03240398, 0.03257758, 0.03319672, 0.03371569,
0.03283395, 0.03113155, 0.03098734, 0.03117398, 0.03105016,
0.03142975, 0.03257728, 0.03247998, 0.02796762, 0.0279737 ,
0.02797954])
self["CH4"] = numpy.array([ 0.01317666, 0.01317666, 0.08705683, 0.1460802 , 0.1961588 ,
0.2331317 , 0.2815615 , 0.3160522 , 0.3272379 , 0.3720405 ,
0.451968 , 0.5704313 , 0.7142055 , 0.8724697 , 1.009724 ,
1.109014 , 1.179674 , 1.198244 , 1.214824 , 1.223625 ,
1.232015 , 1.266925 , 1.313765 , 1.358645 , 1.425535 ,
1.492905 , 1.557734 , 1.603984 , 1.643204 , 1.652804 ,
1.663104 , 1.674134 , 1.685915 , 1.687485 , 1.688995 ,
1.690425 , 1.691735 , 1.692905 , 1.696045 , 1.699355 ,
1.702845 , 1.706505 , 1.710355 , 1.726075 , 1.739925 ,
1.754405 , 1.766195 , 1.775945 , 1.784715 , 1.784065 ,
1.783385 , 1.783484 , 1.783962 , 1.784378 , 1.784571 ,
1.784771 , 1.784851 , 1.784919 , 1.784596 , 1.783859 ,
1.782935 , 1.78108 , 1.779142 , 1.777374 , 1.775601 ,
1.774139 , 1.772954 , 1.771877 , 1.771152 , 1.770435 ,
1.770026 , 1.76958 , 1.769015 , 1.768432 , 1.767866 ,
1.767543 , 1.767622 , 1.767913 , 1.768471 , 1.769251 ,
1.769707 , 1.769996 , 1.769724 , 1.769372 , 1.768402 ,
1.76658 , 1.76361 , 1.759861 , 1.756264 , 1.752808 ,
1.749678 , 1.746125 , 1.743467 , 1.74281 , 1.742632 ,
1.741991 , 1.738949 , 1.738823 , 1.738054 , 1.738482 ,
1.738864 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 282.488
self["S2M"]["Q"] = 7599.56436003
self["S2M"]["O"] = 0.0279795394023
self["S2M"]["P"] = 1029.69702
self["S2M"]["U"] = 0.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 1
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 282.488
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = -45.309
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([2006, 7, 10])
self["TIME"] = numpy.array([0, 0, 0])
| [
"bucricket@gmail.com"
] | bucricket@gmail.com |
bdff99867244f35c2cca367095e00638f4182ed5 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-vod/huaweicloudsdkvod/v1/model/publish_assets_response.py | 981baa1a542fb9aab9b16d4419a291edee3eff10 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,341 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class PublishAssetsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'asset_info_array': 'list[AssetInfo]'
}
attribute_map = {
'asset_info_array': 'asset_info_array'
}
def __init__(self, asset_info_array=None):
"""PublishAssetsResponse - a model defined in huaweicloud sdk"""
super(PublishAssetsResponse, self).__init__()
self._asset_info_array = None
self.discriminator = None
if asset_info_array is not None:
self.asset_info_array = asset_info_array
@property
def asset_info_array(self):
"""Gets the asset_info_array of this PublishAssetsResponse.
发布的媒资信息。
:return: The asset_info_array of this PublishAssetsResponse.
:rtype: list[AssetInfo]
"""
return self._asset_info_array
@asset_info_array.setter
def asset_info_array(self, asset_info_array):
"""Sets the asset_info_array of this PublishAssetsResponse.
发布的媒资信息。
:param asset_info_array: The asset_info_array of this PublishAssetsResponse.
:type: list[AssetInfo]
"""
self._asset_info_array = asset_info_array
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PublishAssetsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
d08baa74c958543c08f257524f13f413c0c5f5b3 | ad5a84d32c867f8fc4a6faa46eb74f6df3d32998 | /src/manage.py | 821d500afe119df543154ec72028d2caf8696c1b | [] | no_license | cdbullones/How-start-off-a-project-Django-Rest-Framework | be9e9441870b8f6a8226695e03fcfd7910f8689f | b87f5f786c20d3b7f553a8f3e846caa5c5db755c | refs/heads/master | 2020-04-16T05:11:32.604314 | 2019-01-11T22:42:42 | 2019-01-11T22:42:42 | 165,295,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'api_base.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"cbullones@arawato.co"
] | cbullones@arawato.co |
f6839412a0ad934cf2b91b2707a7ce0d41f7e5be | 7e637a8bf79688aafaf9a02b6ed2bef6d22eefc7 | /aispace/models/base_pretrained.py | a9cdf9b4f3a20db6bd68f2326e7b0a8bd6a9d226 | [
"Apache-2.0"
] | permissive | yingyuankai/AiSpace | ebda1e9b2edda4bd7bcd5e41f4b6a49e9085003e | 54e8886e4dad2a6b5b45c48d8a98a42eb77d2406 | refs/heads/master | 2023-03-10T18:16:35.418289 | 2023-02-16T04:13:15 | 2023-02-16T04:13:15 | 232,947,595 | 32 | 4 | Apache-2.0 | 2022-11-22T02:45:33 | 2020-01-10T02:21:26 | Python | UTF-8 | Python | false | false | 3,270 | py | # -*- coding: utf-8 -*-
# @Time : 2019-11-04 19:35
# @Author : yingyuankai
# @Email : yingyuankai@aliyun.com
# @File : pretrained_base.py
import logging
from pathlib import Path
from abc import ABCMeta, abstractmethod
import tensorflow as tf
from aispace.utils.hparams import Hparams
from aispace.utils.file_utils import *
logger = logging.getLogger(__name__)
class BasePretrained(BaseModel):
__metaclass__ = ABCMeta
def __init__(self, hparams: Hparams, **kwargs):
super(BasePretrained, self).__init__(hparams, **kwargs)
self._MODEL2URL = hparams.family
self._MODEL_NAME = hparams.name
self.cache_dir = hparams.cache_dir
# self.pretrained_model_path = self.download_checkpoint(self._MODEL_NAME, self.cache_dir)
def download_checkpoint(self, pretrained_model_name, cache_dir=None):
r"""Download the specified pre-trained checkpoint, and return the
directory in which the checkpoint is cached.
Args:
pretrained_model_name (str): Name of the model checkpoint.
cache_dir (str, optional): Path to the cache directory. If `None`,
uses the default directory (user's home directory).
Returns:
Path to the cache directory.
"""
if pretrained_model_name in self._MODEL2URL:
download_path = self._MODEL2URL[pretrained_model_name]
else:
raise ValueError(
"Pre-trained model not found: {}".format(pretrained_model_name))
if cache_dir is None:
cache_path = default_download_dir(self._MODEL_NAME)
else:
cache_path = Path(cache_dir)
cache_path = cache_path / pretrained_model_name
if not cache_path.exists():
if isinstance(download_path, list):
for path in download_path:
maybe_download(path, str(cache_path))
else:
filename = download_path.split('/')[-1]
maybe_download(download_path, str(cache_path), extract=True)
folder = None
for file in cache_path.iterdir():
if file.is_dir():
folder = file
assert folder is not None
(cache_path / filename).unlink()
for file in folder.iterdir():
file.rename(file.parents[1] / file.name)
folder.rmdir()
print("Pre-trained {} checkpoint {} cached to {}".format(
self._MODEL_NAME, pretrained_model_name, cache_path))
else:
print("Using cached pre-trained {} checkpoint from {}.".format(
self._MODEL_NAME, cache_path))
return str(cache_path)
@abstractmethod
def _init_from_checkpoint(self, pretrained_model_name, cache_dir, scope_name, **kwargs):
r"""Initialize model parameters from weights stored in the pre-trained
checkpoint.
Args:
pretrained_model_name (str): Name of the pre-trained model.
cache_dir (str): Path to the cache directory.
scope_name: Variable scope.
**kwargs: Additional arguments for specific models.
"""
raise NotImplementedError | [
"yingyuankai@sogou-inc.com"
] | yingyuankai@sogou-inc.com |
0ff5e17d60c3598706105acdc7d288e45e737d05 | 8a34f80d6c646adf6af674a5ff3743fa4111a492 | /blog/settings.py | b29d1483ff5f5391eadd4da3e8d525606f4f2adc | [] | no_license | darevalo8/blog_pastor | 04a41db618b5291d86b777ff741997e2cb8fcf2f | aaea4197247e87077aa1cfe2247b47579acffd36 | refs/heads/main | 2023-02-17T10:18:27.763918 | 2021-01-09T17:03:01 | 2021-01-09T17:03:01 | 328,203,331 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,257 | py | """
Django settings for blog project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z0f7ethy1^5s15e9r^pd0dor)943!xczb3i1+b!h@fp1*3aojx'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posts.apps.PostsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')], # Esto lo toque
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Bogota'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"), # Yo lo toque
'/var/www/static/',
]
| [
"dr@todoo.co"
] | dr@todoo.co |
e3af4d6ab9808460198837d63b6e0f5553b57bbb | 04b1803adb6653ecb7cb827c4f4aa616afacf629 | /content/browser/frame_host/DEPS | 3da57e57e31e41791a995a6a1205c9dbf9929048 | [
"BSD-3-Clause"
] | permissive | Samsung/Castanets | 240d9338e097b75b3f669604315b06f7cf129d64 | 4896f732fc747dfdcfcbac3d442f2d2d42df264a | refs/heads/castanets_76_dev | 2023-08-31T09:01:04.744346 | 2021-07-30T04:56:25 | 2021-08-11T05:45:21 | 125,484,161 | 58 | 49 | BSD-3-Clause | 2022-10-16T19:31:26 | 2018-03-16T08:07:37 | null | UTF-8 | Python | false | false | 902 | include_rules = [
# The frame_host files should only call upwards in the layering via the
# delegate interfaces.
"-content/browser/web_contents",
"-content/public/browser/web_contents.h",
"-content/public/browser/web_contents_delegate.h",
"-content/public/browser/web_contents_view.h",
]
specific_include_rules = {
".*_(unit|browser)test\.cc": [
"+content/browser/web_contents",
"+content/public/browser/web_contents.h",
"+content/public/browser/web_contents_delegate.h",
],
".*interstitial_page_impl\.cc": [
# TODO(nasko): This should be removed once we remove
# WebContentsObserver as the method of telling interstitial pages to
# clean themselves up.
"+content/browser/web_contents",
"+content/public/browser/web_contents_delegate.h",
],
"popup_menu_helper_mac.mm": [
"+content/app_shim_remote_cocoa/render_widget_host_view_cocoa.h",
]
}
| [
"sunny.nam@samsung.com"
] | sunny.nam@samsung.com | |
5b538b31e4ee590ae593c774f133f990e00d53bf | cfaaad4b552af93f9c5b3cb8e6c7902c4910b343 | /evaluation/analysis.py | 402b0278dfb1018cd9cc6b0bb85a634b029ec716 | [] | no_license | panzhang666/Gibbs_sampling_motif_finding | 205b6834980b1f8c1a22cdf06d5b9c1b9d417051 | eb8ec558a9d5ab18b3454836ad528949eb4002ea | refs/heads/master | 2020-09-29T13:36:45.983638 | 2019-12-10T06:46:44 | 2019-12-10T06:46:44 | 227,048,120 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,477 | py | from __future__ import division
import math
import numpy as np
from matplotlib import pyplot as plt
def read_motif(filename, nLines):
with open(filename, 'r') as f:
f.readline()
motif = {}
motif['nLines'] = nLines
vals = f.readline().strip().split()
motif['0_A'] = int(vals[0])
motif['0_C'] = int(vals[1])
motif['0_G'] = int(vals[2])
motif['0_T'] = int(vals[3])
motif['total'] = motif['0_A']+motif['0_C']+motif['0_G']+motif['0_T']
motif['0_A'] /= motif['total']
motif['0_C'] /= motif['total']
motif['0_G'] /= motif['total']
motif['0_T'] /= motif['total']
for i in range(1, nLines):
vals = f.readline().strip().split()
motif[str(i)+'_A'] = int(vals[0]) / motif['total']
motif[str(i)+'_C'] = int(vals[1]) / motif['total']
motif[str(i)+'_G'] = int(vals[2]) / motif['total']
motif[str(i)+'_T'] = int(vals[3]) / motif['total']
return motif
def open_files(folder_num):
dir_ds = "../CS466_mini/motif_finding/data_set copy " + str(folder_num) + "/"
dir_pred = "../CS466_mini/motif_finding/predicted copy " + str(folder_num) + "/"
with open(dir_ds+"motiflength.txt", 'r') as f:
nLines = int(f.readline().strip())
motif = read_motif(dir_ds+"motif.txt", nLines)
pred_motif = read_motif(dir_pred+"predictedmotif.txt", nLines)
with open(dir_ds+"sites.txt", 'r') as f:
sites = [int(i) for i in f.readlines()]
with open(dir_pred+"predictedsites.txt", 'r') as f:
pred_sites = [int(i) for i in f.readlines()]
with open(dir_pred+"time.txt", 'r') as f:
time = [float(i) for i in f.readlines()][0]
return motif, pred_motif, sites, pred_sites, time
def open_files_better_filename(icpc, ml, sl, sc, vers):
dir_ds = "../CS466_mini/motif_finding/data_set_"
dir_pred = "../CS466_mini/motif_finding/predicted_data_set_"
dirpath = ''
dirpath+="{0:.6f}".format(icpc)
dirpath+="_"
dirpath+=str(ml)
dirpath+="_"
dirpath+=str(sl)
dirpath+="_"
dirpath+=str(sc)
dirpath+="_"
dirpath+=str(vers)
dir_ds += dirpath
dir_pred += dirpath
with open(dir_ds+"/motiflength.txt", 'r') as f:
nLines = int(f.readline().strip())
motif = read_motif(dir_ds+"/motif.txt", nLines)
pred_motif = read_motif(dir_pred+"/predictedmotif.txt", nLines)
with open(dir_ds+"/sites.txt", 'r') as f:
sites = [int(i) for i in f.readlines()]
with open(dir_pred+"/predictedsites.txt", 'r') as f:
pred_sites = [int(i) for i in f.readlines()]
with open(dir_ds+"/time.txt", 'r') as f:
time = [float(i) for i in f.readlines()][0]
return motif, pred_motif, sites, pred_sites, time
def rel_entropy(motif, pred_motif):
"""
Calculate relative entropy of pred_motif relative to motif
"""
to_ret = 0
missing = 0
for i in range(motif['nLines']):
if motif[str(i)+'_A'] == 0:
missing += pred_motif[str(i)+'_A']
if (motif[str(i)+'_T'] == 0):
missing += pred_motif[str(i)+'_T']
if motif[str(i)+'_C'] == 0:
missing += pred_motif[str(i)+'_C']
if (motif[str(i)+'_G'] == 0):
missing += pred_motif[str(i)+'_G']
if motif[str(i)+'_A'] != 0 and pred_motif[str(i)+'_A'] != 0:
to_ret += pred_motif[str(i)+'_A'] * \
math.log(pred_motif[str(i)+'_A'] / motif[str(i)+'_A'])
if motif[str(i)+'_T'] != 0 and pred_motif[str(i)+'_T'] != 0:
to_ret += pred_motif[str(i)+'_T'] * \
math.log(pred_motif[str(i)+'_T'] / motif[str(i)+'_T'])
if motif[str(i)+'_C'] != 0 and pred_motif[str(i)+'_C'] != 0:
to_ret += pred_motif[str(i)+'_C'] * \
math.log(pred_motif[str(i)+'_C'] / motif[str(i)+'_C'])
if motif[str(i)+'_G'] != 0 and pred_motif[str(i)+'_G'] != 0:
to_ret += pred_motif[str(i)+'_G'] * \
math.log(pred_motif[str(i)+'_G'] / motif[str(i)+'_G'])
return abs(to_ret+missing)
def site_overlap(sites, pred_sites, ml):
"""
return amount of overlap
"""
to_ret = 0
for i in range(len(sites)):
diff = (min(sites[i], pred_sites[i]) + ml) - max(sites[i], pred_sites[i])
if diff > 0:
to_ret += diff/ml
return to_ret / len(sites)
class ParamSet:
def __init__(self, icpc, ml, sl, sc):
self.icpc = icpc
self.ml = ml
self.sl = sl
self.sc = sc
self.rel_ent = []
self.sites_overlap = []
self.time = []
def add_rel_ent(self, rel_ent):
""" add relative entropy value """
self.rel_ent.append(rel_ent)
def min_rel_ent(self):
""" calculate minimum relative entropy of all sets included """
return min(self.rel_ent)
def max_rel_ent(self):
""" calculate maximum relative entropy of all sets included """
return max(self.rel_ent)
def avg_rel_ent(self):
""" calculate average relative entropy of all sets included """
return sum(self.rel_ent) / len(self.rel_ent)
def std_rel_ent(self):
""" calculate standard deviation of relative entropy of all sets included """
return np.std(self.rel_ent)
def add_sites(self, sites, pred_sites):
""" calculate and add the overlap amount """
self.sites_overlap.append(site_overlap(sites, pred_sites, self.ml))
def min_sites(self):
""" calculate minimum site overlap of all sets included """
return min(self.sites_overlap)
def max_sites(self):
""" calculate maximum site overlap of all sets included """
return max(self.sites_overlap)
def avg_sites(self):
""" calculate average site overlaps of all sets included """
return sum(self.sites_overlap) / len(self.sites_overlap)
def std_sites(self):
""" calculate standard deviation of site overlaps of all sets included """
return np.std(self.sites_overlap)
def add_time(self, time):
""" add the time """
self.time.append(time)
def min_time(self):
""" calculate minimum time of all sets included """
return min(self.time)
def max_time(self):
""" calculate maximum time of all sets included """
return max(self.time)
def avg_time(self):
""" calculate average time of all sets included """
return sum(self.time) / len(self.time)
def std_time(self):
""" calculate standard deviation of time of all sets included """
return np.std(self.time)
def __str__(self):
string = 'ICPC: '+str(self.icpc)+" ML: "+str(self.ml) + " SL: " + str(self.sl) + " SC: " + str(self.sc) + "\n"
string += 'Rel_Ent\tSite_Overlap\tTime\n'
for i in range(len(self.rel_ent)):
string += "{0:.6f}".format(self.rel_ent[i])
string += '\t'
string += "{0:.6f}".format(self.sites_overlap[i])
string += '\t'
string += "{0:.6f}".format(self.time[i])
string += '\n'
return string
ps_2_8_500_10 = ParamSet(2, 8, 500, 10)
for folder_num in range(1,11):
motif, pred_motif, sites, pred_sites, time = open_files(folder_num)
ps_2_8_500_10.add_rel_ent(rel_entropy(motif, pred_motif))
ps_2_8_500_10.add_sites(sites, pred_sites)
ps_2_8_500_10.add_time(time)
ps_1_8_500_10 = ParamSet(1, 8, 500, 10)
for folder_num in range(11,21):
motif, pred_motif, sites, pred_sites, time = open_files(folder_num)
ps_1_8_500_10.add_rel_ent(rel_entropy(motif, pred_motif))
ps_1_8_500_10.add_sites(sites, pred_sites)
ps_1_8_500_10.add_time(time)
ps_15_8_500_10 = ParamSet(1.5, 8, 500, 10)
for folder_num in range(21,31):
motif, pred_motif, sites, pred_sites, time = open_files(folder_num)
ps_15_8_500_10.add_rel_ent(rel_entropy(motif, pred_motif))
ps_15_8_500_10.add_sites(sites, pred_sites)
ps_15_8_500_10.add_time(time)
ps_2_7_500_10 = ParamSet(2, 7, 500, 10)
for folder_num in range(31,41):
motif, pred_motif, sites, pred_sites, time = open_files(folder_num)
ps_2_7_500_10.add_rel_ent(rel_entropy(motif, pred_motif))
ps_2_7_500_10.add_sites(sites, pred_sites)
ps_2_7_500_10.add_time(time)
ps_2_6_500_10 = ParamSet(2, 6, 500, 10)
for folder_num in range(41,51):
motif, pred_motif, sites, pred_sites, time = open_files(folder_num)
ps_2_6_500_10.add_rel_ent(rel_entropy(motif, pred_motif))
ps_2_6_500_10.add_sites(sites, pred_sites)
ps_2_6_500_10.add_time(time)
ps_2_8_500_20 = ParamSet(2, 8, 500, 20)
for folder_num in range(51,61):
motif, pred_motif, sites, pred_sites, time = open_files(folder_num)
ps_2_8_500_20.add_rel_ent(rel_entropy(motif, pred_motif))
ps_2_8_500_20.add_sites(sites, pred_sites)
ps_2_8_500_20.add_time(time)
ps_2_8_500_5 = ParamSet(2, 8, 500, 5)
for folder_num in range(61,71):
motif, pred_motif, sites, pred_sites, time = open_files(folder_num)
ps_2_8_500_5.add_rel_ent(rel_entropy(motif, pred_motif))
ps_2_8_500_5.add_sites(sites, pred_sites)
ps_2_8_500_5.add_time(time)
with open ('data.txt', 'w') as f:
f.write(str(ps_2_8_500_10))
f.write(str(ps_1_8_500_10))
f.write(str(ps_15_8_500_10))
f.write(str(ps_2_7_500_10))
f.write(str(ps_2_6_500_10))
f.write(str(ps_2_8_500_20))
f.write(str(ps_2_8_500_5))
sc_data = []
for i in range(6, 21, 1):
sc_data.append(ParamSet(2, 8, 500, i))
for vers in range(1, 11, 1):
motif, pred_motif, sites, pred_sites, time = open_files_better_filename(2, 8, 500, i, vers)
sc_data[-1].add_rel_ent(rel_entropy(motif, pred_motif))
sc_data[-1].add_sites(sites, pred_sites)
sc_data[-1].add_time(time)
def rel_icpc():
x = [1, 1.5, 2]
d_icpc = [ps_1_8_500_10, ps_15_8_500_10, ps_2_8_500_10]
y = [i.avg_rel_ent() for i in d_icpc]
yerr_lower = [i.avg_rel_ent() - i.min_rel_ent() for i in d_icpc]
yerr_upper = [i.max_rel_ent() - i.avg_rel_ent() for i in d_icpc]
yerr_std = [i.std_rel_ent() for i in d_icpc]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('ICPC')
ax.set_ylabel('Relative Entropy')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Relative Entropy changes with changes in ICPC")
plt.show()
def rel_ml():
x = [6, 7, 8]
d_ml = [ps_2_6_500_10, ps_2_7_500_10, ps_2_8_500_10]
y = [i.avg_rel_ent() for i in d_ml]
yerr_lower = [i.avg_rel_ent() - i.min_rel_ent() for i in d_ml]
yerr_upper = [i.max_rel_ent() - i.avg_rel_ent() for i in d_ml]
yerr_std = [i.std_rel_ent() for i in d_ml]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('Motif Length')
ax.set_ylabel('Relative Entropy')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Relative Entropy changes with changes in motif length")
plt.show()
def rel_sc():
x = [5, 10, 20]
d_sc = [ps_2_8_500_5, ps_2_8_500_10, ps_2_8_500_20]
y = [i.avg_rel_ent() for i in d_sc]
yerr_lower = [i.avg_rel_ent() - i.min_rel_ent() for i in d_sc]
yerr_upper = [i.max_rel_ent() - i.avg_rel_ent() for i in d_sc]
yerr_std = [i.std_rel_ent() for i in d_sc]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('Sequence Count')
ax.set_ylabel('Relative Entropy')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Relative Entropy changes with changes in sequence count")
plt.show()
def sites_icpc():
x = [1, 1.5, 2]
d_icpc = [ps_1_8_500_10, ps_15_8_500_10, ps_2_8_500_10]
y = [i.avg_sites() for i in d_icpc]
yerr_lower = [i.avg_sites() - i.min_sites() for i in d_icpc]
yerr_upper = [i.max_sites() - i.avg_sites() for i in d_icpc]
yerr_std = [i.std_sites() for i in d_icpc]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('ICPC')
ax.set_ylabel('Sites Overlap')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Sites overlap with changes in ICPC")
plt.show()
def sites_ml():
x = [6, 7, 8]
d_ml = [ps_2_6_500_10, ps_2_7_500_10, ps_2_8_500_10]
y = [i.avg_sites() for i in d_ml]
yerr_lower = [i.avg_sites() - i.min_sites() for i in d_ml]
yerr_upper = [i.max_sites() - i.avg_sites() for i in d_ml]
yerr_std = [i.std_sites() for i in d_ml]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('Motif Length')
ax.set_ylabel('Sites Overlap')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Sites overlap with changes in motif length")
plt.show()
def sites_sc():
x = [5, 10, 20]
d_sc = [ps_2_8_500_5, ps_2_8_500_10, ps_2_8_500_20]
y = [i.avg_sites() for i in d_sc]
yerr_lower = [i.avg_sites() - i.min_sites() for i in d_sc]
yerr_upper = [i.max_sites() - i.avg_sites() for i in d_sc]
yerr_std = [i.std_sites() for i in d_sc]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('Sequence Count')
ax.set_ylabel('Sites Overlap')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Sites overlap with changes in sequence count")
plt.show()
def time_icpc():
x = [1, 1.5, 2]
d_icpc = [ps_1_8_500_10, ps_15_8_500_10, ps_2_8_500_10]
y = [i.avg_time() for i in d_icpc]
yerr_lower = [i.avg_time() - i.min_time() for i in d_icpc]
yerr_upper = [i.max_time() - i.avg_time() for i in d_icpc]
yerr_std = [i.std_time() for i in d_icpc]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('ICPC')
ax.set_ylabel('Time taken')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Time taken with changes in ICPC")
plt.show()
def time_ml():
x = [6, 7, 8]
d_ml = [ps_2_6_500_10, ps_2_7_500_10, ps_2_8_500_10]
y = [i.avg_time() for i in d_ml]
yerr_lower = [i.avg_time() - i.min_time() for i in d_ml]
yerr_upper = [i.max_time() - i.avg_time() for i in d_ml]
yerr_std = [i.std_time() for i in d_ml]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('Motif Length')
ax.set_ylabel('Time taken')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Time taken with changes in motif length")
plt.show()
def time_sc():
x = [5, 10, 20]
d_sc = [ps_2_8_500_5, ps_2_8_500_10, ps_2_8_500_20]
y = [i.avg_time() for i in d_sc]
yerr_lower = [i.avg_time() - i.min_time() for i in d_sc]
yerr_upper = [i.max_time() - i.avg_time() for i in d_sc]
yerr_std = [i.std_time() for i in d_sc]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('Sequence Count')
ax.set_ylabel('Time taken')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Time taken with changes in sequence count")
plt.show()
def rel_sc_better_filename():
x = [i for i in range(6, 21, 1)]
d_sc = sc_data
y = [i.avg_rel_ent() for i in d_sc]
yerr_lower = [i.avg_rel_ent() - i.min_rel_ent() for i in d_sc]
yerr_upper = [i.max_rel_ent() - i.avg_rel_ent() for i in d_sc]
yerr_std = [i.std_rel_ent() for i in d_sc]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('Sequence Count')
ax.set_ylabel('Relative Entropy')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Relative Entropy changes with changes in sequence count")
plt.show()
def sites_sc_better_filename():
x = [i for i in range(6, 21, 1)]
d_sc = sc_data
y = [i.avg_sites() for i in d_sc]
yerr_lower = [i.avg_sites() - i.min_sites() for i in d_sc]
yerr_upper = [i.max_sites() - i.avg_sites() for i in d_sc]
yerr_std = [i.std_sites() for i in d_sc]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('Sequence Count')
ax.set_ylabel('Sites Overlap')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Sites overlap with changes in sequence count")
plt.show()
def time_sc_better_filename():
x = [i for i in range(6, 21, 1)]
d_sc = sc_data
y = [i.avg_time() for i in d_sc]
yerr_lower = [i.avg_time() - i.min_time() for i in d_sc]
yerr_upper = [i.max_time() - i.avg_time() for i in d_sc]
yerr_std = [i.std_time() for i in d_sc]
plt.figure()
ax = plt.gca()
ax.margins(0.05)
ax.set_xlabel('Sequence Count')
ax.set_ylabel('Time taken')
plt.errorbar(x, y, yerr=yerr_std, ecolor='r')
plt.title("Time taken with changes in sequence count")
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
8c677a448294359eddc72929c681abd438b90e80 | 385ed58325dd0cc75bdb9fd3e61c5e005f7a4f28 | /source/difang/src/difang/majiang2/table_state/state_xueliu.py | 8aa6d33479d6ef5c2163185c743230768621fe2e | [] | no_license | csirui/hall37 | 17dfa4e4f1f8bf719d0c11ac7738fa4c14fd06db | 5c4eb4b2bf57bbbee4731470c830d8d81915d603 | refs/heads/master | 2021-09-04T03:55:12.460035 | 2018-01-15T15:12:30 | 2018-01-15T15:12:30 | 117,560,615 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # -*- coding=utf-8
'''
Created on 2016年9月23日
@author: zhaol
'''
from difang.majiang2.table_state.state import MTableState
class MTableStateXueliu(MTableState):
def __init__(self):
super(MTableStateXueliu, self).__init__()
# 血战玩法
self.setState(MTableState.TABLE_STATE_DROP)
# 碰
self.setState(MTableState.TABLE_STATE_PENG)
# 杠
self.setState(MTableState.TABLE_STATE_GANG)
# 定缺
self.setState(MTableState.TABLE_STATE_ABSENCE)
# 和
self.setState(MTableState.TABLE_STATE_HU)
# 和牌后血流成河
self.setState(MTableState.TABLE_STATE_XUELIU)
| [
"cg@ibenxi.com"
] | cg@ibenxi.com |
2147be05b184d926f084212271de3ca388340ec6 | 815cbe079ae7a0bc705dbc61e28ae2a6db67fb40 | /HW1/consumer/consume.py | 37c370073cfc76503f5ed72dd34f642954a912d8 | [] | no_license | Avens8/prom-prog | d81a79a81e4f3eef0383321b16aa007ac3a0c2c1 | 865aa3fea92328167d97a2355ee1c49b1cc47d0e | refs/heads/master | 2020-04-27T00:00:52.505525 | 2019-03-11T14:45:33 | 2019-03-11T14:45:33 | 173,921,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | import pika
import random
import time
rabbit_params = pika.ConnectionParameters('rabbit', 5672)
print(f"Params: {rabbit_params}")
while True:
try:
connection = pika.BlockingConnection(rabbit_params)
break
except Exception:
time.sleep(2)
channel = connection.channel()
channel.queue_declare(queue='random_queue')
consume = lambda ch, meth, head, body: print("Got number", int(body.decode()))
channel.basic_consume(consume, 'random_queue')
channel.start_consuming()
| [
"tsayipad@gmail.com"
] | tsayipad@gmail.com |
602f08b17eaf38c3e18c8e2e5eae219ff5d7062c | b3a6c99ac54db4af1e56d01e77d6dc7442fd4eb0 | /iamminji/leetcode/medium/my_calendar_II.py | b885dbcf701d47a7a5bf916f9f60222382587023 | [] | no_license | algorizum/problem-solving | d2981bc3256ba322cb37c8678b165eb1d51232d5 | dacc0b5ccda149f1c25f73649ee41ea595604f46 | refs/heads/master | 2021-01-19T16:43:40.773066 | 2018-05-17T15:28:06 | 2018-05-17T15:28:06 | 101,023,963 | 6 | 3 | null | 2018-05-17T15:28:07 | 2017-08-22T05:18:48 | Java | UTF-8 | Python | false | false | 673 | py | # 731. My Calendar II
# https://leetcode.com/problems/my-calendar-ii/description/
class MyCalendarTwo:
def __init__(self):
self.double_booked = list()
self.schedule = list()
def book(self, start, end):
"""
:type start: int
:type end: int
:rtype: bool
"""
for s, e in self.double_booked:
if start < e and end > s:
return False
# check if already booked
for s, e in self.schedule:
if start < e and end > s:
self.double_booked.append((max(start, s), min(end, e)))
self.schedule.append((start, end))
return True
| [
"sberryyogurt@gmail.com"
] | sberryyogurt@gmail.com |
4c56b7b942d3642808308b257c1f7a97574dee8c | 002e67eae7a2cffe90bc71955ea8ca22a9b54599 | /module_planner/module_planner/wsgi.py | 93a53468f756950765d3caf66a28d2b6cb635e47 | [] | no_license | alcarney/moduleplanner-django | 2711486590d83fc2d59e9535bbcfcb9b124fba74 | bf82eb02085a24b35c26f852bf0df76212664ba6 | refs/heads/master | 2021-01-02T23:13:57.728867 | 2014-11-13T16:29:20 | 2014-11-13T16:29:20 | 25,308,743 | 3 | 0 | null | 2014-10-30T18:07:22 | 2014-10-16T15:31:14 | Python | UTF-8 | Python | false | false | 403 | py | """
WSGI config for module_planner project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "module_planner.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"alex.rugby101@gmail.com"
] | alex.rugby101@gmail.com |
676f0f869065ed32c492d9de95bbf5207f3642af | 096571f7ae7cc729530aa06bc0ce9b7c252557e3 | /music_controller/frontend/urls.py | 13f3e33540d632f9615e05e3a5aec1d1bdb16712 | [
"MIT"
] | permissive | ParthThakur/spotify-shared-room | bd92a5200a113608952a21515ce0e5d231a84979 | 89e115c837e898a3afc822079c42eb22befa1b32 | refs/heads/main | 2023-04-10T06:43:08.532090 | 2021-04-19T22:46:29 | 2021-04-19T22:46:29 | 357,845,497 | 0 | 0 | null | 2021-04-19T16:30:02 | 2021-04-14T09:18:35 | Python | UTF-8 | Python | false | false | 218 | py | from django.urls import path
from .views import index
app_name = 'frontend'
urlpatterns = [
path('', index, name=''),
path('room/<str:roomCode>', index, name='current_room'),
path('<str:page>', index),
]
| [
"parththakur@gmail.com"
] | parththakur@gmail.com |
0cfd8b34c4c0105a50bc0096032ea57377428d01 | bb8321a62fa5f46b75c13f52a42bfb83a7a87ced | /src/tools/submission alogn.py | b25b66e672b1908aa99102281c6cb79354146e14 | [] | no_license | GuysBarash/Omission-attack-code | 51e45ac7f2ad188a93a4a5095012be6997930ee0 | 36188248c7b60728b9e94e74150936b05a05540b | refs/heads/master | 2023-06-29T13:00:40.029342 | 2021-08-05T12:46:46 | 2021-08-05T12:46:46 | 265,535,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,710 | py | bb = '''
Dataset clss Surrogate victim method Results
CIFAR10 10 Googlenet Resnet18 KNN 0.25
CIFAR10 10 Googlenet MobleNetV2 KNN 0.15
CIFAR10 10 Googlenet VGG11 KNN 0.14
CIFAR10 10 Googlenet AlexNet KNN 0.08
MNIST 2 X KNN5 KNN 0.85
MNIST 2 X GNB KNN 0.80
MNIST 3 X ANN KNN 0.69
MNIST 2 ANN ANN Greedy 0.46
MNIST 2 X ANN KNN 0.45
MNIST 3 GNB Dtree Genetic 0.44
MNIST 2 GNB GNB Genetic 0.43
MNIST 2 GNB GNB Greedy 0.43
MNIST 2 ANN ANN Genetic 0.27
MNIST 2 X SVM KNN 0.18
MNIST 3 X Dtree KNN 0.16
MNIST 2 SVM SVM Genetic 0.15
MNIST 3 X KNN5 KNN 0.15
MNIST 3 X SVM KNN 0.10
MNIST 2 SVM SVM Greedy 0.07
Synthetic 2 X KNN5 KNN 1.00
Synthetic 2 X Dtree KNN 0.90
Synthetic 2 X ANN KNN 0.65
Synthetic 2 X SVM KNN 0.48
Synthetic 2 X GNB KNN 0.17
'''
wb = '''
Dataset classes victim method Results
IMDB 2 1DConvNet Genetic 0.80
MNIST 2 ANN Genetic 1.00
MNIST 2 GNB Genetic 1.00
MNIST 2 GNB Greedy 1.00
MNIST 3 ANN Genetic 1.00
MNIST 3 SVM Genetic 1.00
MNIST 2 KNN5 Genetic 0.90
MNIST 2 SVM Genetic 0.82
MNIST 3 KNN5 Genetic 0.55
MNIST 2 ANN Greedy 0.54
MNIST 2 KNN5 Greedy 0.25
MNIST 2 SVM Greedy 0.05
Synthetic 2 KNN5 Genetic 0.99
Synthetic 2 ANN Genetic 0.88
Synthetic 2 SVM Genetic 0.87
Synthetic 2 ANN Greedy 0.86
Synthetic 2 Dtree Genetic 0.85
Synthetic 2 GNB Genetic 0.58
Synthetic 2 Dtree Greedy 0.55
Synthetic 2 GNB Greedy 0.52
Synthetic 2 KNN5 Greedy 0.36
Synthetic 2 SVM Greedy 0.17
'''
def format_table(s):
ls = s.split('\n')
ls = [lt.split('\t') for lt in ls if lt != '']
ltitle = ls[0]
ls = ls[1:]
print(r'\hline')
print(' & '.join(ltitle) + '\t' + r'\\')
print(r'\hline')
for lt in ls:
print(' & '.join(lt) + '\t' + r'\\')
print(r'\hline')
format_table(wb) | [
"guysbarash@gmail.com"
] | guysbarash@gmail.com |
b0f9d17a9f5562e351378ebc5872b0ee1c592875 | 778896ead2f15578b7bb6bb4b664070289145abf | /dataset.py | 7dd0fc92edaf27f3c19e57a1fab020b5159db27b | [] | no_license | bognarpeter/satellite-image-analysis | 4e412e08765fd2317c33ca36737637c28da1cfac | 79ef085026a9c2c4e7898ef86e63c57d779b7b2d | refs/heads/master | 2023-08-01T04:19:55.932424 | 2021-09-29T15:24:08 | 2021-09-29T15:24:08 | 411,719,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,675 | py | import os
from PIL import Image
from torch.utils.data import Dataset
import numpy as np
SPLIT_CHARACTER = "_"
MASK_COLOR_MAP = "L"
MASK_BASE_NAME = "mask"
def normalize_array(array):
""""""
dividend = array - np.min(array)
divisor = np.max(array) - np.min(array)
if divisor == 0:
return array
return dividend / divisor
def get_mask_name(image_name):
""""""
image_name_array = image_name.split(SPLIT_CHARACTER)
mask_name = SPLIT_CHARACTER.join(
[MASK_BASE_NAME, image_name_array[-2], image_name_array[-1]]
)
return mask_name
class SARDataset(Dataset):
def __init__(self, image_dir, mask_dir, transform=None):
self._image_dir = image_dir
self._mask_dir = mask_dir
self._transform = transform
self._images = os.listdir(image_dir)
def __len__(self):
return len(self._images)
def __getitem__(self, index):
img_path = os.path.join(self._image_dir, self._images[index])
mask_name = get_mask_name(self._images[index])
mask_path = os.path.join(self._mask_dir, mask_name)
image = np.array(Image.open(img_path))
mask = np.array(Image.open(mask_path).convert(MASK_COLOR_MAP), dtype=np.float32)
image = normalize_array(image)
# urban areas should be white
mask[mask == 255.0] = 1.0
# where the satellite image is blank, the mask should be black
mask[image == 0.0] = 0.0
if self._transform is not None:
augmentations = self._transform(image=image, mask=mask)
image = augmentations["image"]
mask = augmentations["mask"]
return image, mask
| [
"bognarpaeter@gmail.com"
] | bognarpaeter@gmail.com |
6d00a47bea0f21c205fe7a23b59e03cab7808ad9 | 93c10825c0442dbbdccc3fe79c321dfa4b716e64 | /article/prepro.py | e1c5124ce98d50b7c25584058aff3d842cc8519e | [] | no_license | hzhang131/research | 37c37478d8ce740b19566c008cecff4989c22c9f | 0f04a5986d9dee4b268cbfbb647209b525362265 | refs/heads/master | 2023-01-01T19:40:33.653542 | 2020-10-24T11:52:20 | 2020-10-24T11:52:20 | 305,125,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,675 | py | import pandas
import wikipediaapi
import wikipedia
import csv
import re
from collections import Counter
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, HashingVectorizer
from sklearn.svm import OneClassSVM
import json
import re
import nltk
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from nltk.stem.porter import *
from sklearn.model_selection import KFold
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from termcolor import colored
from scipy import spatial
from numpy import dot
from numpy.linalg import norm
import sys
import time
import wikipediaapi
import wikipedia
from termcolor import colored
from scipy import spatial
from numpy import dot
from numpy.linalg import norm
import sys
import time
from elasticsearch import Elasticsearch, helpers
from elasticsearch_dsl import Index, Search
from py2neo import Graph
import os
def prepro():
id_abstract_pair = {}
title_abstract_pair = {}
# print(os.path.exists('../wikipedia/id_title_abstract_updated.txt'))
# with open('../wikipedia/id_title_abstract_updated.txt', 'r') as file:
#
# for row in file:
# lst = row.split('\t')
# id_ = lst[0]
# abstract = lst[2]
# title = lst[1]
# id_abstract_pair[id_] = abstract
# title_abstract_pair[title] = abstract
cis = {}
# with open('../wikipedia/category_ids.txt', 'r') as file:
# print('3')
# for idx, row in enumerate(file):
# if not idx % 100000:
# print('3', idx)
# lst = row.split('\t')
# cat = lst[0]
# ids = lst[1:]
# ids = [i for i in ids if i in id_abstract_pair]
# if len(ids) < 500 and 'All' not in cat and 'Wikipedia' not in cat and 'Articles' not in cat and 'articles' not in cat \
# and 'dmy' not in cat and 'Pages' not in cat and 'mdy' not in cat and 'different' not in cat \
# and 'CS1' not in cat and 'Webarchive' not in cat and 'wikidata' not in cat and 'link' not in cat\
# and 'Wikidata' not in cat and 'Vague' not in cat and 'Use' not in cat and 'List' not in cat:
# # if 'Performing_arts' in cat:
# # print(cat)
# cis[cat] = ids
# print(list(cis.keys())[:50])
ics = {}
# with open('../wikipedia/id_categories.txt', 'r') as file:
# for idx, row in enumerate(file):
# if not idx % 1000000:
# print('2', idx)
# lst = row.split('\t')
# id_ = lst[0]
# if id_ in id_abstract_pair:
# id_ = lst[0]
# cats = [i for i in lst[1:] if i in cis]
# ics[id_] = cats
title_id_pair = {}
id_title_pair = {}
# with open('../wikipedia/id_title_abstract_updated.txt', 'r') as file:
# for row in file:
# lst = row.split('\t')
# id_ = lst[0]
# title = lst[1]
# title_id_pair[title] = id_
# id_title_pair[id_] = title
sw = ["a", "about", "above", "after", "again", "against", "ain", "all", "am", "an", "and", "any", "are", "aren", "aren't", "as", "at", "be", "because", "been", "before", "being", "below", "between", "both", "but", "by", "can", "couldn", "couldn't", "d", "did", "didn", "didn't", "do", "does", "doesn", "doesn't", "doing", "don", "don't", "down", "during", "each", "few", "for", "from", "further", "had", "hadn", "hadn't", "has", "hasn", "hasn't", "have", "haven", "haven't", "having", "he", "her", "here", "hers", "herself", "him", "himself", "his", "how", "i", "if", "in", "into", "is", "isn", "isn't", "it", "it's", "its", "itself", "just", "ll", "m", "ma", "me", "mightn", "mightn't", "more", "most", "mustn", "mustn't", "my", "myself", "needn", "needn't", "no", "nor", "not", "now", "o", "of", "off", "on", "once", "only", "or", "other", "our", "ours", "ourselves", "out", "over", "own", "re", "s", "same", "shan", "shan't", "she", "she's", "should", "should've", "shouldn", "shouldn't", "so", "some", "such", "t", "than", "that", "that'll", "the", "their", "theirs", "them", "themselves", "then", "there", "these", "they", "this", "those", "through", "to", "too", "under", "until", "up", "ve", "very", "was", "wasn", "wasn't", "we", "were", "weren", "weren't", "what", "when", "where", "which", "while", "who", "whom", "why", "will", "with", "won", "won't", "wouldn", "wouldn't", "y", "you", "you'd", "you'll", "you're", "you've", "your", "yours", "yourself", "yourselves", "could", "he'd", "he'll", "he's", "here's", "how's", "i'd", "i'll", "i'm", "i've", "let's", "ought", "she'd", "she'll", "that's", "there's", "they'd", "they'll", "they're", "they've", "we'd", "we'll", "we're", "we've", "what's", "when's", "where's", "who's", "why's", "would", "able", "abst", "accordance", "according", "accordingly", "across", "act", "actually", "added", "adj", "affected", "affecting", "affects", "afterwards", "ah", "almost", "alone", "along", "already", "also", "although", "always", "among", "amongst", "announce", "another", "anybody", "anyhow", "anymore", "anyone", "anything", "anyway", "anyways", "anywhere", "apparently", "approximately", "arent", "arise", "around", "aside", "ask", "asking", "auth", "available", "away", "awfully", "b", "back", "became", "become", "becomes", "becoming", "beforehand", "begin", "beginning", "beginnings", "begins", "behind", "believe", "beside", "besides", "beyond", "biol", "brief", "briefly", "c", "ca", "came", "cannot", "can't", "cause", "causes", "certain", "certainly", "co", "com", "come", "comes", "contain", "containing", "contains", "couldnt", "date", "different", "done", "downwards", "due", "e", "ed", "edu", "effect", "eg", "eight", "eighty", "either", "else", "elsewhere", "end", "ending", "enough", "especially", "et", "etc", "even", "ever", "every", "everybody", "everyone", "everything", "everywhere", "ex", "except", "f", "far", "ff", "fifth", "first", "five", "fix", "followed", "following", "follows", "former", "formerly", "forth", "found", "four", "furthermore", "g", "gave", "get", "gets", "getting", "give", "given", "gives", "giving", "go", "goes", "gone", "got", "gotten", "h", "happens", "hardly", "hed", "hence", "hereafter", "hereby", "herein", "heres", "hereupon", "hes", "hi", "hid", "hither", "home", "howbeit", "however", "hundred", "id", "ie", "im", "immediate", "immediately", "importance", "important", "inc", "indeed", "index", "information", "instead", "invention", "inward", "itd", "it'll", "j", "k", "keep", "keeps", "kept", "kg", "km", "know", "known", "knows", "l", "largely", "last", "lately", "later", "latter", "latterly", "least", "less", "lest", "let", "lets", "like", "liked", "likely", "line", "little", "'ll", "look", "looking", "looks", "ltd", "made", "mainly", "make", "makes", "many", "may", "maybe", "mean", "means", "meantime", "meanwhile", "merely", "mg", "might", "million", "miss", "ml", "moreover", "mostly", "mr", "mrs", "much", "mug", "must", "n", "na", "name", "namely", "nay", "nd", "near", "nearly", "necessarily", "necessary", "need", "needs", "neither", "never", "nevertheless", "new", "next", "nine", "ninety", "nobody", "non", "none", "nonetheless", "noone", "normally", "nos", "noted", "nothing", "nowhere", "obtain", "obtained", "obviously", "often", "oh", "ok", "okay", "old", "omitted", "one", "ones", "onto", "ord", "others", "otherwise", "outside", "overall", "owing", "p", "page", "pages", "part", "particular", "particularly", "past", "per", "perhaps", "placed", "please", "plus", "poorly", "possible", "possibly", "potentially", "pp", "predominantly", "present", "previously", "primarily", "probably", "promptly", "proud", "provides", "put", "q", "que", "quickly", "quite", "qv", "r", "ran", "rather", "rd", "readily", "really", "recent", "recently", "ref", "refs", "regarding", "regardless", "regards", "related", "relatively", "research", "respectively", "resulted", "resulting", "results", "right", "run", "said", "saw", "say", "saying", "says", "sec", "section", "see", "seeing", "seem", "seemed", "seeming", "seems", "seen", "self", "selves", "sent", "seven", "several", "shall", "shed", "shes", "show", "showed", "shown", "showns", "shows", "significant", "significantly", "similar", "similarly", "since", "six", "slightly", "somebody", "somehow", "someone", "somethan", "something", "sometime", "sometimes", "somewhat", "somewhere", "soon", "sorry", "specifically", "specified", "specify", "specifying", "still", "stop", "strongly", "sub", "substantially", "successfully", "sufficiently", "suggest", "sup", "sure", "take", "taken", "taking", "tell", "tends", "th", "thank", "thanks", "thanx", "thats", "that've", "thence", "thereafter", "thereby", "thered", "therefore", "therein", "there'll", "thereof", "therere", "theres", "thereto", "thereupon", "there've", "theyd", "theyre", "think", "thou", "though", "thoughh", "thousand", "throug", "throughout", "thru", "thus", "til", "tip", "together", "took", "toward", "towards", "tried", "tries", "truly", "try", "trying", "ts", "twice", "two", "u", "un", "unfortunately", "unless", "unlike", "unlikely", "unto", "upon", "ups", "us", "use", "used", "useful", "usefully", "usefulness", "uses", "using", "usually", "v", "value", "various", "'ve", "via", "viz", "vol", "vols", "vs", "w", "want", "wants", "wasnt", "way", "wed", "welcome", "went", "werent", "whatever", "what'll", "whats", "whence", "whenever", "whereafter", "whereas", "whereby", "wherein", "wheres", "whereupon", "wherever", "whether", "whim", "whither", "whod", "whoever", "whole", "who'll", "whomever", "whos", "whose", "widely", "willing", "wish", "within", "without", "wont", "words", "world", "wouldnt", "www", "x", "yes", "yet", "youd", "youre", "z", "zero", "a's", "ain't", "allow", "allows", "apart", "appear", "appreciate", "appropriate", "associated", "best", "better", "c'mon", "c's", "cant", "changes", "clearly", "concerning", "consequently", "consider", "considering", "corresponding", "course", "currently", "definitely", "described", "despite", "entirely", "exactly", "example", "going", "greetings", "hello", "help", "hopefully", "ignored", "inasmuch", "indicate", "indicated", "indicates", "inner", "insofar", "it'd", "keep", "keeps", "novel", "presumably", "reasonably", "second", "secondly", "sensible", "serious", "seriously", "sure", "t's", "third", "thorough", "thoroughly", "three", "well", "wonder", "a", "about", "above", "above", "across", "after", "afterwards", "again", "against", "all", "almost", "alone", "along", "already", "also", "although", "always", "am", "among", "amongst", "amoungst", "amount", "an", "and", "another", "any", "anyhow", "anyone", "anything", "anyway", "anywhere", "are", "around", "as", "at", "back", "be", "became", "because", "become", "becomes", "becoming", "been", "before", "beforehand", "behind", "being", "below", "beside", "besides", "between", "beyond", "bill", "both", "bottom", "but", "by", "call", "can", "cannot", "cant", "co", "con", "could", "couldnt", "cry", "de", "describe", "detail", "do", "done", "down", "due", "during", "each", "eg", "eight", "either", "eleven", "else", "elsewhere", "empty", "enough", "etc", "even", "ever", "every", "everyone", "everything", "everywhere", "except", "few", "fifteen", "fify", "fill", "find", "fire", "first", "five", "for", "former", "formerly", "forty", "found", "four", "from", "front", "full", "further", "get", "give", "go", "had", "has", "hasnt", "have", "he", "hence", "her", "here", "hereafter", "hereby", "herein", "hereupon", "hers", "herself", "him", "himself", "his", "how", "however", "hundred", "ie", "if", "in", "inc", "indeed", "interest", "into", "is", "it", "its", "itself", "keep", "last", "latter", "latterly", "least", "less", "ltd", "made", "many", "may", "me", "meanwhile", "might", "mill", "mine", "more", "moreover", "most", "mostly", "move", "much", "must", "my", "myself", "name", "namely", "neither", "never", "nevertheless", "next", "nine", "no", "nobody", "none", "noone", "nor", "not", "nothing", "now", "nowhere", "of", "off", "often", "on", "once", "one", "only", "onto", "or", "other", "others", "otherwise", "our", "ours", "ourselves", "out", "over", "own", "part", "per", "perhaps", "please", "put", "rather", "re", "same", "see", "seem", "seemed", "seeming", "seems", "serious", "several", "she", "should", "show", "side", "since", "sincere", "six", "sixty", "so", "some", "somehow", "someone", "something", "sometime", "sometimes", "somewhere", "still", "such", "system", "take", "ten", "than", "that", "the", "their", "them", "themselves", "then", "thence", "there", "thereafter", "thereby", "therefore", "therein", "thereupon", "these", "they", "thickv", "thin", "third", "this", "those", "though", "three", "through", "throughout", "thru", "thus", "to", "together", "too", "top", "toward", "towards", "twelve", "twenty", "two", "un", "under", "until", "up", "upon", "us", "very", "via", "was", "we", "well", "were", "what", "whatever", "when", "whence", "whenever", "where", "whereafter", "whereas", "whereby", "wherein", "whereupon", "wherever", "whether", "which", "while", "whither", "who", "whoever", "whole", "whom", "whose", "why", "will", "with", "within", "without", "would", "yet", "you", "your", "yours", "yourself", "yourselves", "the", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "co", "op", "research-articl", "pagecount", "cit", "ibid", "les", "le", "au", "que", "est", "pas", "vol", "el", "los", "pp", "u201d", "well-b", "http", "volumtype", "par", "0o", "0s", "3a", "3b", "3d", "6b", "6o", "a1", "a2", "a3", "a4", "ab", "ac", "ad", "ae", "af", "ag", "aj", "al", "an", "ao", "ap", "ar", "av", "aw", "ax", "ay", "az", "b1", "b2", "b3", "ba", "bc", "bd", "be", "bi", "bj", "bk", "bl", "bn", "bp", "br", "bs", "bt", "bu", "bx", "c1", "c2", "c3", "cc", "cd", "ce", "cf", "cg", "ch", "ci", "cj", "cl", "cm", "cn", "cp", "cq", "cr", "cs", "ct", "cu", "cv", "cx", "cy", "cz", "d2", "da", "dc", "dd", "de", "df", "di", "dj", "dk", "dl", "do", "dp", "dr", "ds", "dt", "du", "dx", "dy", "e2", "e3", "ea", "ec", "ed", "ee", "ef", "ei", "ej", "el", "em", "en", "eo", "ep", "eq", "er", "es", "et", "eu", "ev", "ex", "ey", "f2", "fa", "fc", "ff", "fi", "fj", "fl", "fn", "fo", "fr", "fs", "ft", "fu", "fy", "ga", "ge", "gi", "gj", "gl", "go", "gr", "gs", "gy", "h2", "h3", "hh", "hi", "hj", "ho", "hr", "hs", "hu", "hy", "i", "i2", "i3", "i4", "i6", "i7", "i8", "ia", "ib", "ic", "ie", "ig", "ih", "ii", "ij", "il", "in", "io", "ip", "iq", "ir", "iv", "ix", "iy", "iz", "jj", "jr", "js", "jt", "ju", "ke", "kg", "kj", "km", "ko", "l2", "la", "lb", "lc", "lf", "lj", "ln", "lo", "lr", "ls", "lt", "m2", "ml", "mn", "mo", "ms", "mt", "mu", "n2", "nc", "nd", "ne", "ng", "ni", "nj", "nl", "nn", "nr", "ns", "nt", "ny", "oa", "ob", "oc", "od", "of", "og", "oi", "oj", "ol", "om", "on", "oo", "oq", "or", "os", "ot", "ou", "ow", "ox", "oz", "p1", "p2", "p3", "pc", "pd", "pe", "pf", "ph", "pi", "pj", "pk", "pl", "pm", "pn", "po", "pq", "pr", "ps", "pt", "pu", "py", "qj", "qu", "r2", "ra", "rc", "rd", "rf", "rh", "ri", "rj", "rl", "rm", "rn", "ro", "rq", "rr", "rs", "rt", "ru", "rv", "ry", "s2", "sa", "sc", "sd", "se", "sf", "si", "sj", "sl", "sm", "sn", "sp", "sq", "sr", "ss", "st", "sy", "sz", "t1", "t2", "t3", "tb", "tc", "td", "te", "tf", "th", "ti", "tj", "tl", "tm", "tn", "tp", "tq", "tr", "ts", "tt", "tv", "tx", "ue", "ui", "uj", "uk", "um", "un", "uo", "ur", "ut", "va", "wa", "vd", "wi", "vj", "vo", "wo", "vq", "vt", "vu", "x1", "x2", "x3", "xf", "xi", "xj", "xk", "xl", "xn", "xo", "xs", "xt", "xv", "xx", "y2", "yj", "yl", "yr", "ys", "yt", "zi", "zz"]
graph = Graph("bolt://localhost:11005", auth=("neo4j", "230804"))
# sw = stopwords.words("english")
swd = Counter(sw)
# process the training dataset.
# topic distribution: 50% CS, 50% non-CS.
# More automotives, politicans and non-CS scientists.
# Sports events, history, zoology. flora and fauna.
# Divided_regions, Social institutions, Music and musicians
noncs_list = []
topics = ['Performing_arts', 'Visual_arts', 'Natural_sciences', 'Home_economics', 'Linguistics', 'Literature',
'Law-related_lists', 'Philosophy', 'Theology', 'Anthropology', 'Archaeology', 'Archaeology', 'Geography', 'Political_science'
,'Cognitive_science', 'Sociology', 'Social_work', 'Biology', 'Chemistry', 'Earth_sciences', 'Space_science', 'Physics', 'Mathematics', 'Statistics', 'Business',
'Chemical_engineering', 'Civil_engineering', 'Materials_science', 'Mechanical_engineering', 'Systems_science', 'Advertising', 'Music_genres', 'Car_ classifications'
,'21st-century_politicians', 'Sports_terminology', 'Zoology', 'Plant_subfamilies', '21st-century_social_scientists', 'Countries_in_Asia', 'Car_brands',
'Military_technology', 'Journalism', 'Geography_of_the_United_States', 'Music', 'Types_of_university_or_college', 'Political_scandals', 'Finance',
'Public_safety_ministries', 'Banking', 'Foods', 'Mountains', 'Performance_art_venues', 'Rivers', 'Acting', 'Current_shows', 'Ammunition', 'Film_genres', 'Naval_ warfare_tactics',
'Snake_genera']
# topics = ['\'' + i + '\'' for i in topics]
topics = ['\"\'' + i + '\'\"' for i in topics]
query = '[' + ','.join(topics) + ']'
tbl = graph.run('match (a:Article) -[:in] -> (b:Category_name) where b.name in %s return a.title, a.abstract' % query).to_table()
tbl = np.array(tbl)
out_noncs = []
for row in tbl:
if 'Wikipedia' in row[0] or 'Template' in row[0] or 'Category' in row[0] or not row[1]:
continue
else:
string = row[1]
string = string.replace(re.escape(row[0]), '***')
string = re.sub(r'\(.*?\)', '', string)
if not string:
continue
string = string.lower()
string = ' '.join([word for word in string.split() if word not in swd])
out_noncs.append(string)
# for i in topics:
# # string = 'Category:'+ i
# # wiki_wiki = wikipediaapi.Wikipedia('en')
# # dic = wiki_wiki.page(string).categorymembers
# print(i, i in cis)
# if i in cis:
# dic = cis[i]
# dic = [id_title_pair.get(j) for j in dic if j in id_title_pair]
# noncs_list += [key for key in dic if 'Wikipedia' not in key and 'Template' not in key and 'Category' not in key]
# else:
# continue
# # noncs_list += [key for key in dic if 'Wikipedia' not in key and 'Template' not in key and 'Category' not in key]
#
# out_noncs = {}
# for idx, i in enumerate(noncs_list):
# if not idx % 100:
# print(i)
# if i in noncs_list and i not in out_noncs and i in title_abstract_pair:
# out_noncs[i] = title_abstract_pair.get(i).replace(i, '***')
# out_noncs[i] = re.sub(r'\(.*?\)', '', out_noncs[i])
# out_noncs[i] = out_noncs[i].lower()
# out_noncs[i] = ' '.join([word for word in out_noncs[i].split() if word not in swd])
#
# len(out_noncs)
cs_list = []
topics = ['Artificial_intelligence', 'Computational_science', 'Computer_graphics', 'Computer_architecture', 'Computer_security', 'Analysis_of_algorithms',
'Algorithms', 'Theoretical_computer_science', 'Human-computer_interaction', 'Human-based_computation', 'Mathematical_optimization', 'Programming_languages', 'Type_theory', 'Concurrency_control'
,'Formal_methods', 'Database_theory', 'Software_engineering', 'Theory_of_computation', 'Embedded_systems', 'Computer_scientists', 'Computer_networking', 'Computer_hardware', 'Unsolved_problems_in_computer_ science', 'Software', 'Computational_geometry',
'Computer_systems', 'Computer_science_education', 'Distributed_computing', 'Computer_science_organizations', 'Computer_science_conferences', 'Programming_contests', 'Data_mining', 'Compiler_optimizations', 'Data_modeling_languages']
# topics = ['\'' + i + '\'' for i in topics]
topics = ['\"\'' + i + '\'\"' for i in topics]
query = '[' + ','.join(topics) + ']'
tbl = graph.run('match (a:Article) -[:in] -> (b:Category_name) where b.name in %s return a.title, a.abstract' % query).to_table()
tbl = np.array(tbl)
out_cs = []
for row in tbl:
if 'Wikipedia' in row[0] or 'Template' in row[0] or 'Category' in row[0] or not row[1]:
continue
else:
string = row[1]
string = string.replace(re.escape(row[0]), '***')
string = re.sub(r'\(.*?\)', '', string)
if not string:
continue
string = string.lower()
string = ' '.join([word for word in string.split() if word not in swd])
out_cs.append(string)
# for i in topics:
# # string = 'Category:'+ i
# # wiki_wiki = wikipediaapi.Wikipedia('en')
# # dic = wiki_wiki.page(string).categorymembers
# print(i, i in cis)
# if i in cis:
# print('true')
# dic = cis[i]
# dic = [id_title_pair.get(j) for j in dic if j in id_title_pair]
# print(dic[0])
# cs_list += [key for key in dic if 'Wikipedia' not in key and 'Template' not in key and 'Category' not in key]
# else:
# continue
# out_cs = {}
# for idx, i in enumerate(cs_list):
# if not idx % 100:
# print(i)
# if i in cs_list and i not in out_cs and i in title_abstract_pair:
# out_cs[i] = title_abstract_pair.get(i).replace(i, '***')
# out_cs[i] = re.sub(r'\(.*?\)', '', out_cs[i])
# out_cs[i] = out_cs[i].lower()
# out_cs[i] = ' '.join([word for word in out_cs[i].split() if word not in swd])
# len(out_cs)
training_tuples = []
# cs is 1, non_cs is 0.
for key in out_noncs:
# training_tuples.append([out_noncs.get(key), 0])
training_tuples.append([key, 0])
for key in out_cs:
# training_tuples.append([out_cs.get(key), 1])
training_tuples.append([key, 1])
training_tuples = np.array(training_tuples)
np.random.shuffle(training_tuples)
x_train, y_train = [], []
for i in training_tuples:
x_train.append(i[0])
y_train.append(i[1])
print(len(y_train))
vectorizer = TfidfVectorizer(ngram_range=(1, 2), min_df = 20)
X = vectorizer.fit_transform(x_train)
Y = np.array(y_train)
kf = KFold(n_splits=2)
a, b, c, d = [], [], [], []
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
Y_train, y_test = Y[train_index], Y[test_index]
clf = GaussianNB()
clf.fit(X_train.todense(), Y_train)
y_pred = clf.predict(X_test.todense())
a.append(f1_score(y_test, y_pred, average="macro"))
b.append(precision_score(y_test, y_pred, average="macro"))
c.append(recall_score(y_test, y_pred, average="macro"))
d.append(accuracy_score(y_test, y_pred))
# print('f1 ', np.mean(a))
# print('precision ', np.mean(b))
# print('recall ', np.mean(c))
# print('accuracy ', np.mean(d))
X_train, X_test = X[:len(Y)//2], X[len(Y)//2:]
Y_train, Y_test = Y[:len(Y)//2], Y[len(Y)//2:]
clf = GaussianNB()
# clf.fit(X.todense(), Y)
clf.fit(X_train.todense(), Y_train)
Y_pred = clf.predict(X_test.todense())
accuracy_score(Y_test, Y_pred)
return vectorizer, clf, ics, cis, id_abstract_pair, title_id_pair, id_title_pair, swd
def suggest(query_in, vectorizer, clf, es, swd, filter_):
start = time.time()
# , 'abstract^10.0'
print(colored('start searching ', 'magenta'))
if filter_:
res = es.search(index="ita", body={"from": 0, "size": 100, "query": {"multi_match": {"query": query_in +'^5' + ' %s' %filter_, "fields": ['title^10.0', 'abstract^1.0']}}})
else:
res = es.search(index="ita", body={"from": 0, "size": 100, "query": {"multi_match": {"query": query_in, "fields": ['title^10.0', 'abstract^1.0']}}})
print(colored('done searching ', 'magenta'))
summary_list = []
trained_list = []
words = []
for i in res['hits']['hits']:
if i.get('_source').get('abstract'):
raw = i.get('_source').get('abstract')
processed = re.sub(r'\(.*?\)', '', raw)
processed = processed.lower()
processed = ' '.join([word for word in processed.split() if word not in swd])
trained_list.append(processed)
summary_list.append(raw)
words.append(i.get('_source').get('title'))
if not summary_list:
print(colored('No results Available', 'red'))
sys.exit()
trained_list = np.array(trained_list)
transformed = vectorizer.transform(trained_list)
y = clf.predict(transformed.todense())
keep_words = []
temp_out = []
for idx in range(len(y)):
if int(y[idx]) == 1 and 'List' not in words[idx]:
temp_out.append(summary_list[idx])
keep_words.append(words[idx])
print('DONE', time.time() - start)
return keep_words[:10], temp_out[:10]
| [
"zhanghongshuo@MacBook-Pro-5.local"
] | zhanghongshuo@MacBook-Pro-5.local |
96fa1c6a7f7094305b7d3f692a71be99c555cc09 | 9655be1c21f8b02c190c976c17ad6b77e1773c91 | /DjangoPlayground/wsgi.py | f7c1037ce7c533066ec2acb7884d9e0de2d70724 | [] | no_license | nathan5x-zz/DjangoPlayground | 66bb7eeb13a77b709232fa4cc67ac4e9ac993a4a | e0cf2567eaaf018ff76ba5a0b6ee253e9d651cd0 | refs/heads/master | 2021-09-25T08:51:39.001488 | 2018-10-20T01:00:57 | 2018-10-20T01:00:57 | 153,854,007 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for DjangoPlayground project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DjangoPlayground.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"walkwithsabari@gmail.com"
] | walkwithsabari@gmail.com |
292ffd198700cdc76c0bcbe232ae0cb3ca792a13 | 07b751896b5e8c029a1808f5587a9bb30090b0b4 | /tensorflow/python/data/experimental/kernel_tests/restructured_dataset_test.py | 3b0d23d6e11ee17a3fe6ac5cf9cce767232c559a | [
"Apache-2.0"
] | permissive | danfischetti/tensorflow | c5326578bac35c6f9a47444d8f91e03097fc2506 | f3d4bf4345a442f605a45b1fbf74ea9656fa72ed | refs/heads/master | 2020-04-11T10:07:21.324395 | 2018-12-13T22:46:13 | 2018-12-13T22:46:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,105 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_RestructuredDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
# TODO(b/119837791): Add eager coverage
class RestructuredDatasetTest(test_base.DatasetTestBase):
@test_util.run_deprecated_v1
def testRestructureDataset(self):
components = (array_ops.placeholder(dtypes.int32),
(array_ops.placeholder(dtypes.int32, shape=[None]),
array_ops.placeholder(dtypes.int32, shape=[20, 30])))
dataset = dataset_ops.Dataset.from_tensors(components)
i32 = dtypes.int32
test_cases = [((i32, i32, i32), None),
(((i32, i32), i32), None),
((i32, i32, i32), (None, None, None)),
((i32, i32, i32), ([17], [17], [20, 30]))]
for new_types, new_shape_lists in test_cases:
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
self.assertEqual(new_types, new.output_types)
if new_shape_lists is not None:
for expected_shape_list, shape in zip(
nest.flatten(new_shape_lists), nest.flatten(new.output_shapes)):
if expected_shape_list is None:
self.assertIs(None, shape.ndims)
else:
self.assertEqual(expected_shape_list, shape.as_list())
fail_cases = [((i32, dtypes.int64, i32), None),
((i32, i32, i32, i32), None),
((i32, i32, i32), ((None, None), None)),
((i32, i32, i32), (None, None, None, None)),
((i32, i32, i32), (None, [None], [21, 30]))]
for new_types, new_shape_lists in fail_cases:
with self.assertRaises(ValueError):
# pylint: disable=protected-access
new = batching._RestructuredDataset(dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
if __name__ == "__main__":
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
07d557b67c5f57d0bc58e144628ef21653545f9f | ff8db86ce558e57f7b24f8f6d890a3154f6d948f | /neutron_plugin_contrail/plugins/opencontrail/loadbalancer/v2/loadbalancer_member.py | bcc4781fbd29f19c81389f17ff651e751bc75193 | [
"Apache-2.0"
] | permissive | lungdear/tf-neutron-plugin | 143740d1cafb93f4cbe672e53a609c4771be6833 | d19e758673e1e28bf8b270b8e934857014a46cdf | refs/heads/master | 2022-12-04T21:18:39.869684 | 2020-08-08T13:32:59 | 2020-08-11T20:06:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,424 | py | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import uuid
from neutron_lbaas.extensions import loadbalancerv2
try:
from neutron.openstack.common import uuidutils
except ImportError:
from oslo_utils import uuidutils
try:
from neutron.common.exceptions import NotAuthorized
except ImportError:
from neutron_lib.exceptions import NotAuthorized
from vnc_api.vnc_api import IdPermsType, NoIdError
from vnc_api.vnc_api import LoadbalancerMember, LoadbalancerMemberType
from .. resource_manager import ResourceManager
class LoadbalancerMemberManager(ResourceManager):
_loadbalancer_member_type_mapping = {
'admin_state': 'admin_state_up',
'status': 'status',
'protocol_port': 'protocol_port',
'weight': 'weight',
'address': 'address',
'subnet_id': 'subnet_id',
}
@property
def property_type_mapping(self):
return self._loadbalancer_member_type_mapping
def make_properties(self, member):
props = LoadbalancerMemberType()
for key, mapping in self._loadbalancer_member_type_mapping.iteritems():
if mapping in member:
setattr(props, key, member[mapping])
return props
def _get_member_pool_id(self, member):
pool_uuid = member.parent_uuid
return pool_uuid
def make_dict(self, member, fields=None):
res = {'id': member.uuid,
'name': member.name,
'pool_id': member.parent_uuid,
'status': self._get_object_status(member)}
try:
pool = self._api.loadbalancer_pool_read(id=member.parent_uuid)
res['tenant_id'] = pool.parent_uuid.replace('-', '')
except NoIdError:
pass
props = member.get_loadbalancer_member_properties()
for key, mapping in self._loadbalancer_member_type_mapping.iteritems():
value = getattr(props, key, None)
if value is not None:
res[mapping] = value
return self._fields(res, fields)
def resource_read(self, id):
return self._api.loadbalancer_member_read(id=id)
def resource_list(self, tenant_id=None):
""" In order to retrive all the members for a specific tenant
the code iterates through all the pools.
"""
if tenant_id is None:
return self._api.loadbalancer_members_list()
pool_list = self._api.loadbalancer_pools_list(tenant_id)
if 'loadbalancer-pools' not in pool_list:
return {}
member_list = []
for pool in pool_list['loadbalancer-pools']:
pool_members = self._api.loadbalancer_members_list(
parent_id=pool['uuid'])
if 'loadbalancer-members' in pool_members:
member_list.extend(pool_members['loadbalancer-members'])
response = {'loadbalancer-members': member_list}
return response
def get_resource(self, context, id, pool_id, fields=None):
res = super(LoadbalancerMemberManager, self).get_resource(context, id)
if res and res['pool_id'] != pool_id:
raise loadbalancerv2.MemberNotFoundForPool(member_id=res['id'],
pool_id=res['pool_id'])
return self._fields(res, fields)
def get_collection(self, context, pool_id, filters=None, fields=None):
""" Optimize the query for members in a pool.
"""
member_list = []
pool_members = self._api.loadbalancer_members_list(
parent_id=pool_id)
if 'loadbalancer-members' in pool_members:
member_list.extend(pool_members['loadbalancer-members'])
response = []
for m in member_list:
res = self._get_resource_dict(m['uuid'], filters, fields)
if res is not None and self._is_authorized(context, res):
response.append(res)
return response
def resource_update(self, obj):
return self._api.loadbalancer_member_update(obj)
def resource_delete(self, id):
return self._api.loadbalancer_member_delete(id=id)
def get_exception_notfound(self, id=None):
return loadbalancerv2.EntityNotFound(name=self.neutron_name, id=id)
def get_exception_inuse(self, id=None):
pass
@property
def neutron_name(self):
return "member"
@property
def resource_name_plural(self):
return "loadbalancer-members"
def create(self, context, pool_id, member):
"""
Create a loadbalancer_member object.
"""
m = member['member']
try:
pool = self._api.loadbalancer_pool_read(id=pool_id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name='Pool', id=pool_id)
tenant_id = self._get_tenant_id_for_create(context, m)
if str(uuid.UUID(tenant_id)) != pool.parent_uuid:
raise NotAuthorized()
obj_uuid = uuidutils.generate_uuid()
props = self.make_properties(m)
id_perms = IdPermsType(enable=True)
member_db = LoadbalancerMember(
obj_uuid, pool, loadbalancer_member_properties=props,
id_perms=id_perms)
member_db.uuid = obj_uuid
self._api.loadbalancer_member_create(member_db)
return self.make_dict(member_db)
def update_properties(self, member_db, id, m):
props = member_db.get_loadbalancer_member_properties()
if self.update_properties_subr(props, m):
member_db.set_loadbalancer_member_properties(props)
return True
return False
def delete(self, context, id, pool_id):
try:
_ = self._api.loadbalancer_member_read(id=id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name=self.neutron_name, id=id)
try:
pool = self._api.loadbalancer_pool_read(id=pool_id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name='Pool',
id=pool_id)
if id not in [member['uuid'] for member in
pool.get_loadbalancer_members() or []]:
raise loadbalancerv2.MemberNotFoundForPool(member_id=id,
pool_id=pool_id)
super(LoadbalancerMemberManager, self).delete(context, id)
def update_object(self, member_db, id, m):
pool_id = member_db.parent_uuid
try:
pool = self._api.loadbalancer_pool_read(id=pool_id)
except NoIdError:
raise loadbalancerv2.EntityNotFound(name='Pool',
id=pool_id)
db_props = member_db.get_loadbalancer_member_properties()
members = pool.get_loadbalancer_members()
for member in members or []:
if id == member['uuid']:
continue
member_obj = self._api.loadbalancer_member_read(id=member['uuid'])
props = member_obj.get_loadbalancer_member_properties()
if (props.get_address() == db_props.get_address() and
props.get_protocol_port() == db_props.get_protocol_port()):
raise loadbalancerv2.MemberExists(
address=props.get_address(),
port=props.get_protocol_port(),
pool=pool_id)
return True
| [
"andrey-mp@yandex.ru"
] | andrey-mp@yandex.ru |
29b8e917251adc96df1f11a3d62ca45cc65c01e7 | f95402fb9d62fb91f8ff9aa8577766428366c96c | /f1info/custom_attachment/ikspecs.py | 5c59033384c122ac840cc9abcc1bb798dfd5682c | [] | no_license | a-iv/f1info | 53d9edb632c2de7c90296a65dae5402f46a056f2 | 9c268c30f4cae6ca449db9d42aa4e63d92a3a532 | refs/heads/master | 2021-01-19T09:41:59.106051 | 2011-07-22T15:39:51 | 2011-07-22T15:39:51 | 528,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,165 | py | # -*- coding: utf-8 -*-
from imagekit import processors
from imagekit.specs import ImageSpec
class ResizeDisplay(processors.Resize):
width = 800
height = 800
class Display(ImageSpec):
processors = [ResizeDisplay, ]
class ThumbDisplay(processors.Resize):
width = 80
height = 80
class MiniDisplay(processors.Resize):
width = 48
height = 48
crop = True
class ArticlesDisplay(processors.Resize):
width = 65
height = 65
crop = True
class TopDisplay(processors.Resize):
width = 320
class HeatDisplay(processors.Resize):
width = 266
class Thumb(ImageSpec):
pre_cache = True
processors = [ThumbDisplay, ]
class Mini(ImageSpec):
crop = ('center',)
pre_cache = True
processors = [MiniDisplay, ]
quality = 95
class Articles(ImageSpec):
crop = ('center',)
pre_cache = True
processors = [ArticlesDisplay, ]
quality = 95
class Top(ImageSpec):
pre_cache = True
processors = [TopDisplay, ]
quality = 95
class Heat(ImageSpec):
pre_cache = True
processors = [HeatDisplay, ]
quality = 95
| [
"mansellfan@gmail.com"
] | mansellfan@gmail.com |
33aac62c06dca320ef84cbca693af39b9e8b6757 | ee6caf788762d7e297aed4c291b20012ed681410 | /92. Codeforces/R73-C.py | 25419f28edf76623870b575c939e5b06d5e7ad59 | [] | no_license | dmlimgo/Problem-Solving | 61ea51f1737f572714bc5030470a73a6e0339336 | c265ccac046b3e87c34d014876fde11f33a15ed9 | refs/heads/master | 2020-08-30T15:45:08.895947 | 2020-02-02T14:45:28 | 2020-02-02T14:45:28 | 218,424,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | Q = int(input())
for q in range(Q):
c, m, x = map(int, input().split())
if c == 0 or m == 0:
print(0)
continue
if min(c,m,x) == x:
c -= x
m -= x
b = max(c, m)
s = min(c, m)
if (s+b)//3 > s:
print(s+x)
else:
print((s+b)//3+x)
continue
if min(c,m,x) == c or min(c,m,x) == m:
print(min(c,m,x))
continue
| [
"dongmyeong.lim@gmail.com"
] | dongmyeong.lim@gmail.com |
8ffa50442984e8f6275b53067b3c5253870efedf | e978b784d8fcc8f9a79a3bcd58c6dec3a9f50545 | /src/ETL/exceptions.py | a5fbc2312536fb3d19e8733621854630ca36b105 | [] | no_license | mmetelytsia/etl_project | 2a9e80c586dbbe9c96f1915022451dcc51e8e828 | 33f65c64b1e75eeec574ab84727a82b4be0ef73b | refs/heads/master | 2021-01-20T12:49:43.491569 | 2017-05-07T22:56:53 | 2017-05-07T22:56:53 | 90,413,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | """Exceptions raised by ETL."""
class EtlError(Exception):
"""Base class for all ETL exceptions."""
class EtlTransformException(EtlError):
"""Base class for all ETL transform exceptions.
Attributes:
record -- input record in which the error occurred
fields -- input fields in which the error occurred
"""
def __init__(self, record, fields):
self.record = record
self.fields = fields
class EmptyValueException(EtlTransformException):
"""Exception raised for empty value in required parameters.
Attributes:
record -- input record in which the error occurred
fields -- input fields in which the error occurred
message -- information about exception
"""
def __init__(self, record, fields):
super().__init__(record, fields)
self.message = 'Record %s has empty values in fields: %s' % (str(record), str(fields))
class MalformedCsvError(EtlError):
"""Exception raised for invalid input csv.
Attributes:
message -- message passed
error -- exception object
"""
def __init__(self, message, error):
self.message = message
self.error = error
| [
"mariya.metelitsa@gmail.com"
] | mariya.metelitsa@gmail.com |
be4400b3dcf28fcef63096ca84ec60477babb536 | cab4b8c9501100ceb30f4850ee2d2879122a247f | /app/recommendMe/myapp/models.py | f45e8cd01cabbb6ab54916e81a0aaf34c2630d2a | [] | no_license | Projet-de-fin-d-annee-2018-2019/Deployment | 8992e6736be78ef922f97c370187d5c834275d79 | 2f623347d590a8ca78461e3119525eb34f0bd2eb | refs/heads/master | 2020-07-20T05:37:46.127499 | 2019-09-05T14:24:48 | 2019-09-05T14:24:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | from django.db import models
class User(models.Model):
username = models.CharField(max_length=100)
bio = models.CharField(max_length=100)
following = models.CharField(max_length=10000)
postsCaption = models.CharField(max_length=10000)
categorie = models.CharField(max_length=100) | [
"kilaniiheb7@gmail.com"
] | kilaniiheb7@gmail.com |
6b75e66b7182ecc217fcf6cf12e24451b43ad307 | aa9647e01ace505d9c70e5247af0bce6749bdc45 | /src/db.py | b34ffc4a33947da398cf2efb32ceeecdd3a2e601 | [
"MIT"
] | permissive | cgDeepLearn/pyserver | 83853875dc33173eb3ae72b2e70c7db2c9ba3404 | 5a5e23ccafcc203b2d70eef289ec618ff9da0481 | refs/heads/main | 2023-01-29T05:45:52.110262 | 2020-12-09T09:03:33 | 2020-12-09T09:03:33 | 311,908,364 | 0 | 0 | MIT | 2020-11-16T08:39:29 | 2020-11-11T08:29:55 | Python | UTF-8 | Python | false | false | 5,075 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : db.py
# @Author : cgDeepLearn
# @Create Date : 2020/11/16-3:30 下午
import redis
from conf import config
import pymysql
from DBUtils.PooledDB import PooledDB
from utils.log import logger
class RedisOps(object):
FIELD_EXIST = 0
NEW_FIELD = 1
def __init__(self, host, port, password, db):
rd = redis.ConnectionPool(host=host, port=port, password=password, db=db)
self.rd = redis.Redis(connection_pool=rd)
class MysqlOps(object):
def __init__(self, host, port, user, passwd, db):
self.pool = PooledDB(
pymysql,
mincached=10,
maxcached=30,
maxconnections=0,
host=host,
user=user,
passwd=passwd,
db=db,
port=port,
charset='utf8')
self.user_apply = 'user_apply'
self.user_base = 'user_base'
self.flows = 'flows'
self.table_list = list()
def _execute(self, sql, values):
'''
每次都使用新的连接池中的链接
'''
conn = self.pool.connection()
cur = conn.cursor()
cur.execute(sql, values)
conn.commit()
conn.close()
return cur
def _check_parameter(self, sql, values):
count = sql.count('%s')
if count > 0:
for elem in values:
if not elem:
return False
return True
def _get_table_list(self):
if len(self.table_list) == 0:
sql = '''SELECT COUNT(id) FROM data_split_info'''
table_num = list(self.select(sql))[0][0]
self.table_list = [num for num in range(0, table_num)]
def _replace(self, sql, table, num):
if num == 0:
if table in sql:
string = ' AND %s.deleted_at is null' % table
sql = sql + string
else:
pattern = '%s' % table
string = '%s_%d' % (table, num)
sql = sql.replace(pattern, string)
return sql
def _mulselect(self, apply_id, sql, values):
self._get_table_list()
mulcur = list()
for num in self.table_list:
temp_c = 0
sql_tmp = sql
sql_tmp = self._replace(sql_tmp, self.user_apply, num)
sql_tmp = self._replace(sql_tmp, self.user_base, num)
sql_tmp = self._replace(sql_tmp, self.flows, num)
cur = self._execute(sql_tmp, values)
for row in cur:
temp_c = temp_c + 1
mulcur.append(row)
logger.info('apply_id:%d _mulselect sql:%s, values:%s, result:%s',
apply_id, sql_tmp, values, temp_c)
return mulcur
def mulselect(self, sql, values=[], apply_id=0, check=False, log=True):
'''
多表查询接口
1、支持mysql基本查询,不支持聚集函数和分组排序等
'''
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
if log:
logger.info('apply_id:%d mulselect sql:%s, values:%s', apply_id,
sql, values)
cur = self._mulselect(apply_id, sql, values)
for row in cur:
yield row
def sinselect(self, sql, values=[], apply_id=0, check=False, log=True):
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
#过渡期间,增加deleted_at值判断
sql = self._replace(sql, self.user_apply, num=0)
sql = self._replace(sql, self.user_base, num=0)
sql = self._replace(sql, self.flows, num=0)
if log:
logger.info('apply_id:%d sinselect sql:%s, values:%s', apply_id,
sql, values)
cur = self._execute(sql, values)
for row in cur:
yield row
def select(self, sql, values=[], apply_id=0, check=False, log=True):
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
if log:
logger.info('apply_id:%d select sql:%s, values:%s', apply_id, sql,
values)
cur = self._execute(sql, values)
for row in cur:
yield row
def execute(self, sql, values=[], apply_id=0, check=False, log=True):
sql = sql.replace('\n', '')
if check and not self._check_parameter(sql, values):
return
if log:
logger.info('apply_id:%d execute sql:%s, values:%s', apply_id, sql,
values)
cur = self._execute(sql, values)
redis_op = RedisOps(
host=config.redis_host, port=config.redis_port, password=config.redis_pwd, db=config.redis_db)
mysql_op = MysqlOps(
host=config.mysql_host,
port=config.mysql_port,
user=config.mysql_user,
passwd=config.mysql_pwd,
db=config.mysql_db)
if __name__ == '__main__':
print(dir(redis_op))
print(dir(mysql_op)) | [
"cglearningnow@163.com"
] | cglearningnow@163.com |
4f66898e78968d145cadffd50f0fbaa0bc24e6f1 | 3b1daac7c1f72b985da899770d98e5f0e8fb835c | /Configurations/VBS/2017CR_v7/plot.py | 98e0a0b236687fec6d81492a000ee0a41787e122 | [] | no_license | freejiebao/PlotsConfigurations | 7e10aa45aa3bf742f30d1e21dc565d59d2a025d8 | cdfd3aff38d1ece9599a699997753bc8ba01b9b1 | refs/heads/master | 2020-06-18T19:22:00.561542 | 2019-09-02T12:52:28 | 2019-09-02T12:52:28 | 186,931,874 | 0 | 0 | null | 2019-05-16T01:58:07 | 2019-05-16T01:58:07 | null | UTF-8 | Python | false | false | 1,009 | py | # plot configuration
# groupPlot = {}
#
# Groups of samples to improve the plots (merge different sample during plot).
# If not defined, normal plots is used
#
Red=632; Violet=880; Green=416; Orange=800; Yellow=400; Azure=860
groupPlot['non-prompt'] = {
'nameHR' : 'non-Prompt',
'isSignal' : 0,
'color': Yellow, # kYellow
'samples' : ['Fake_lep']
}
##Fake and prompt substraction
plot['Fake_lep'] = {
'color': Yellow, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
##Data
plot['DATA'] = {
'nameHR' : 'Data',
'color': 1 ,
'isSignal' : 0,
'isData' : 1 ,
'isBlind' : 1 ,
'scale' : 1.0
}
# additional options
legend['lumi'] = 'L = 41.5/fb'
legend['sqrt'] = '#sqrt{s} = 13 TeV'
| [
"jiexiao@pku.edu.cn"
] | jiexiao@pku.edu.cn |
c8e50a2f806e941cfcfc95dfe3ee375f2a3bcde0 | 1a62763db43f03f79e81c4a9a06745fd22c9f0bf | /键盘监听/TCPserver.py | 1b778dd2941d10531e72d93cbc48901c56d00d95 | [] | no_license | wly92645492/auxiliary_amusement | 20a2c700cd43313e2d9b1e41caa7cc1291616571 | 72f7fa4e01f683b7ff5b3d3202e273d7defbf370 | refs/heads/master | 2020-03-15T19:16:00.935556 | 2018-05-06T03:29:32 | 2018-05-06T03:29:32 | 132,304,364 | 1 | 0 | null | 2018-05-06T03:46:28 | 2018-05-06T03:46:27 | null | UTF-8 | Python | false | false | 714 | py | # -*- coding: utf-8 -*-
import socket
import threading
'''服务器端'''
# 监听的IP及端口
bind_ip = "127.0.0.1"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print "[*] Listening on %s:%d" % (bind_ip, bind_port)
def handle_client(client_socket):
request = client_socket.recv(1024)
print "[*] Received:%s" % request
client_socket.send("ok!")
client_socket.close()
while True:
client, addr = server.accept()
print "[*] Accept connection from:%s:%d" % (addr[0], addr[1])
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
| [
"2990599771@qq.com"
] | 2990599771@qq.com |
3b9a8c542f7180818a301d2cb4f2baa8e4146092 | 6cdbc4f559cc9d61f172830718dc1448dfbf0808 | /guessing-game-work.py | ed3f0291e6ac16217540f47cdac742dffb12e117 | [
"Apache-2.0"
] | permissive | register448866/Python-Samples | 90c4b94ca5018b3b4d2a1db87668a623d0c4366e | a18fc707864a6f8169f20a7b828a8789c4ba283c | refs/heads/master | 2020-06-08T13:38:38.336360 | 2019-06-22T00:38:42 | 2019-06-22T00:38:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,474 | py | import random
print()
print('~~~~~~~~~~~~~~')
print('★ GUESS THE MAGIC NUMBER ★')
print('~~~~~~~~~~~~~~')
print()
print('''Instructions: The computer has
randomly generated a magic number between
1 and 50 and it is your job to guess what
that number is! Every time you guess a number,
the computer will tell you if your guess is
too high or too low. You will keep guessing
until you correctly guess the magic number.
Try to guess the magic number in as few
guesses as you can. Good luck!''')
print()
print('~~~~~~~~~~~~~~')
print()
flag = True
while flag:
#################################
# GAME STARTS HERE
#################################
magic_number = random.randint(1, 50)
guess = int(input('Please guess a number between 1 and 50! ⋙ '))
counter = 0
while guess != magic_number:
counter = 1 + counter
if 0 < guess < magic_number:
print()
print("Your guess is too low! Keep trying!")
print()
guess = int(input('Please guess again! ⋙ '))
elif 50 > guess > magic_number:
print()
print("Your guess is too high! Keep trying!")
print()
guess = int(input('Please guess again! ⋙ '))
elif guess > 50:
print()
print("The magic number cannot be more than 50! ")
print()
guess = int(input('Please guess again! ⋙ '))
elif guess < 1:
print()
print("The magic number cannot be less than 1! ")
print()
guess = int(input('Please guess again! ⋙ '))
else:
print()
print("Invalid input!")
print()
guess = int(input('Please guess again! ⋙ '))
if guess == magic_number:
print()
print('~~~~~~~~~~~~~~')
print("CONGRATS, YOU HAVE CORRECTLY GUESSED THE MAGIC NUMBER!")
print(f"Number of attempts: {counter + 1}")
print('~~~~~~~~~~~~~~')
print()
#################################
# GAME ENDS HERE
#################################
x = input("Would you like to play again (y or n)? ")
if x[0].lower() == "n":
print('Have a good day')
print()
break | [
"tvpatel01@gmail.com"
] | tvpatel01@gmail.com |
4adbb6e8afd7e434e7586c6ae23f649c7c704138 | ea2ca23c1aa6f5de744179097ef625d30b5514f0 | /battery.py | 6b0aec623cdbaa9eebcebfc9677a83747575b5f0 | [] | no_license | isovector/oh-my-zsh | 998f11a3b1b9f7aefa97458f803d29a8ae4ad66c | 28bbad0e768954c0fad4cb51e8530bfea489a8b9 | refs/heads/master | 2021-01-16T19:53:26.932483 | 2015-02-19T06:10:00 | 2015-02-19T06:10:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | #!/usr/bin/env python2
# coding=UTF-8
import sys
import time
import math
import string
import subprocess
p = subprocess.Popen(["acpi"], stdout=subprocess.PIPE)
output = string.split(p.communicate()[0])
percent = int(output[3][:-2]) if output[2] != "Full," else 100
width = 10
if output[2] == "Discharging,":
full = u'◂'
empty = u'◃'
else:
full = u'▸'
empty = u'▹'
marks = math.ceil(width * (percent / 100.0))
spaces = math.floor(width - marks)
loader = (full * int(marks)) + (empty * int(spaces))
color_green = '%{[32m%}'
color_yellow = '%{[1;33m%}'
color_red = '%{[31m%}'
color_reset = '%{[00m%}'
color_out = (
color_green if percent > 60
else color_yellow if percent > 25
else color_red
)
sys.stdout.write(color_out)
sys.stdout.write(loader.encode('utf-8'))
sys.stdout.write(color_reset)
| [
"sandy.g.maguire@gmail.com"
] | sandy.g.maguire@gmail.com |
d01e1db1a3d1d0bce24766f0e241c2a7a9923a0f | 665b89f2472f5cf7eb441609eb112109b7381884 | /weblatex/migrations/0003_song_attribution.py | 98e7a851e1a47ea4155fcbc38063165cc4d344cb | [] | no_license | Mortal/weblatex | 5807bf25ea0d6a371e9fc6f0094f7e7375645b6c | 9c841f9ec226e99f38b6e0c4f12e03535d2c06de | refs/heads/master | 2020-05-14T11:53:08.299274 | 2016-12-23T12:03:50 | 2016-12-23T13:43:52 | 24,682,829 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-31 09:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('weblatex', '0002_auto_20151227_1835'),
]
operations = [
migrations.AddField(
model_name='song',
name='attribution',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
]
| [
"rav@cs.au.dk"
] | rav@cs.au.dk |
2b42cfa22e89b7be651d4fd61e31b787d463031c | 6c732bae3f33a411a9bfa743cbf6cc7897eff371 | /src/buffer.py | efe0f0eba641a5b3366d6386098c21d5781bd264 | [] | no_license | JoshuaSFryer/5001-simulation-project | 0d8c8425f23fc614006032101a8f91017d23a734 | c20daac96592a653748f7149a32dc6c2971d3d6d | refs/heads/main | 2023-04-06T20:54:06.752859 | 2021-04-11T21:40:32 | 2021-04-11T21:40:32 | 339,490,496 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | from component import ComponentType
BUFFER_MAX_SIZE = 2
class Buffer:
"""
Queue-like buffer that stores components.
"""
def __init__(self, type:ComponentType):
self.component_type = type
self.contents = list()
def enqueue(self, type:ComponentType):
if not type == self.component_type:
raise BufferException('Incorrect component type')
if self.is_full():
raise BufferException('Cannot push to a full buffer')
self.contents.append(type)
def dequeue(self):
if self.is_empty():
raise BufferException('Cannot pop from empty buffer')
else:
# Get the oldest element
return self.contents.pop(0)
def is_full(self):
return len(self.contents) == BUFFER_MAX_SIZE
def is_empty(self):
return len(self.contents) == 0
def get_length(self):
return len(self.contents)
class BufferException(Exception):
pass | [
"josh.s.fryer@gmail.com"
] | josh.s.fryer@gmail.com |
86becf57eeeb0f2a8248353ca18e87287e8803a6 | abb001be844fff06a9aa7b1662094432547d1f8b | /codeit/leisure1.py | c7f9304d3c0ae9f5ff89f89f6924211362cb41c8 | [] | no_license | lim-so-hyun/my_project | 93cff7ccccef8e05fc036d7a8042a2e806bd5a98 | a17ba91ac8736e9943685357ad9d9e903360160c | refs/heads/main | 2023-06-04T23:47:57.772353 | 2021-06-24T14:49:57 | 2021-06-24T14:49:57 | 332,143,731 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,170 | py | import pandas as pd
import matplotlib.pyplot as plt
leisure_2019 = pd.read_csv('leisure.txt', sep="\t", encoding = 'utf8', index_col = 1)
leisure_2019.drop('기간', axis='columns', inplace=True)
leisure_2019.replace('(.*):(.*)', r'\1.\2', regex=True, inplace=True) # Dataframe 전체에 있던 : 를 .로 변경하기 위한 정규표현식. 그래야 숫자로 취급가능할 것 같아서.
preference_2019 = pd.read_csv('leisure preference 2019.txt', sep='\t', encoding='utf8', index_col=1)
preference_2019.drop('기간', axis='columns', inplace=True)
# 요일별 여가활동 시간 비교
leisure_2019_day = leisure_2019.loc[leisure_2019['행동분류별']=='소계', ['요일평균', '평일', '토요일', '일요일']]
leisure_2019_day = leisure_2019_day.astype('float')
leisure_2019_day = leisure_2019_day.transpose()
plt.rcParams['font.family']='Malgun Gothic'
graph = leisure_2019_day['평일':].plot(kind='bar', title='요일별 여가활동 시간', stacked=True)
for p in graph.patches:
left, bottom, width, height = p.get_bbox().bounds
graph.annotate("%.2f"%(height), xy=(left+width/2, bottom+height/2), ha='center', va='center')
plt.show() | [
"76216512+lim-so-hyun@users.noreply.github.com"
] | 76216512+lim-so-hyun@users.noreply.github.com |
b83ac96495802bc70de9465246f93274976fccef | 4203c5d6a932bd917cc549d56de1e9c73e06522f | /src/boc/pipelines/data_science/nodes.py | 7b297e8785d75e6656342941e60cf6821407e1f8 | [] | no_license | astronomer/kedro-ge-airflow | 41e5463753dcd75562c496cd8b3ca13e68321777 | de0dfc1cb1714be904fced0a0164e6f7d1f52f9b | refs/heads/main | 2023-07-14T12:26:46.885765 | 2021-08-26T13:38:33 | 2021-08-26T13:38:33 | 353,138,100 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,147 | py | # Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code for the nodes in the example pipeline. This code is meant
just for illustrating basic Kedro features.
Delete this when you start working on your own Kedro project.
"""
# pylint: disable=invalid-name
import logging
from typing import Any, Dict
import numpy as np
import pandas as pd
def train_model(
train_x: pd.DataFrame, train_y: pd.DataFrame, parameters: Dict[str, Any]
) -> np.ndarray:
"""Node for training a simple multi-class logistic regression model. The
number of training iterations as well as the learning rate are taken from
conf/project/parameters.yml. All of the data as well as the parameters
will be provided to this function at the time of execution.
"""
num_iter = parameters["example_num_train_iter"]
lr = parameters["example_learning_rate"]
X = train_x.to_numpy()
Y = train_y.to_numpy()
# Add bias to the features
bias = np.ones((X.shape[0], 1))
X = np.concatenate((bias, X), axis=1)
weights = []
# Train one model for each class in Y
for k in range(Y.shape[1]):
# Initialise weights
theta = np.zeros(X.shape[1])
y = Y[:, k]
for _ in range(num_iter):
z = np.dot(X, theta)
h = _sigmoid(z)
gradient = np.dot(X.T, (h - y)) / y.size
theta -= lr * gradient
# Save the weights for each model
weights.append(theta)
# Return a joint multi-class model with weights for all classes
return np.vstack(weights).transpose()
def predict(model: np.ndarray, test_x: pd.DataFrame) -> np.ndarray:
"""Node for making predictions given a pre-trained model and a test set.
"""
X = test_x.to_numpy()
# Add bias to the features
bias = np.ones((X.shape[0], 1))
X = np.concatenate((bias, X), axis=1)
# Predict "probabilities" for each class
result = _sigmoid(np.dot(X, model))
# Return the index of the class with max probability for all samples
return np.argmax(result, axis=1)
def report_accuracy(predictions: np.ndarray, test_y: pd.DataFrame) -> None:
"""Node for reporting the accuracy of the predictions performed by the
previous node. Notice that this function has no outputs, except logging.
"""
# Get true class index
target = np.argmax(test_y.to_numpy(), axis=1)
# Calculate accuracy of predictions
accuracy = np.sum(predictions == target) / target.shape[0]
# Log the accuracy of the model
log = logging.getLogger(__name__)
log.info("Model accuracy on test set: %0.2f%%", accuracy * 100)
return accuracy
def _sigmoid(z):
"""A helper sigmoid function used by the training and the scoring nodes."""
return 1 / (1 + np.exp(-z))
| [
"kentendanas@Kentens-MacBook-Pro.local"
] | kentendanas@Kentens-MacBook-Pro.local |
673c01d2d26270daffb217e67049c5f9edc1b729 | c5487a0ab6ac1bf03cdb5e6942d1581ba863c5a9 | /pgs_venv/bin/django-admin.py | 297ad9bd8e56bf6cc9da1c48fa4b85f3270adebb | [] | no_license | Veldyale/books_projects | 4d4a76ad17fffc32d631d333c506b0cd8292513f | f1139100ebf195f8162aa1f3239e10ba1195c813 | refs/heads/master | 2023-06-22T06:13:46.150921 | 2021-07-20T04:36:14 | 2021-07-20T04:36:14 | 387,556,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | #!/Users/veldyale/PycharmProjects/books_projects/pgs_venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"veldyale@icloud.com"
] | veldyale@icloud.com |
8606b6c98479d61f17568b1a216d7c5c7ac358b1 | dbf8dbbfdad118e0b8ca22afe2f01b31c316d35c | /wishApp/wish/views.py | 8d91a2b6a85e36430a2a2efab4cbe5f963691898 | [] | no_license | Anoldyogi/Django-Projects | 9d8008d9685afe47a51747c026ae8f8a9bc88387 | 70acd8c4f483bd17acaf884a1f6d66a473d79b1d | refs/heads/master | 2020-08-27T09:00:38.518026 | 2019-11-18T05:13:06 | 2019-11-18T05:13:06 | 217,309,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | from django.shortcuts import render
from django.http import HttpResponse
import datetime
# Create your views here.
def wishing(request):
date=datetime.datetime.now()
h=int(date.strftime('%H'))
msg="<h1> Hello Friends How are you "
if h<12:
msg=msg+" GM "
elif h<16:
msg=msg+" GA "
elif h<21:
msg=msg+" GE "
else:
msg=msg+" GN "
msg=msg+"</h1><hr>"
msg=msg+"<h1> crrent time is "+str(date)+"</h1>"
return HttpResponse(msg)
| [
"dhami.yogesh@gmail.com"
] | dhami.yogesh@gmail.com |
68047e4d757b39b366f8eff2dd9ffd35400847c5 | 8cacd666c2a022da32bd7825c973225a7c96507c | /lowercase5.py | ac8eb2f4a123183614175ba56c05c78c4ffeae98 | [] | no_license | AkhtarZainab/Python_Programming | 24331ab5e38b0f572fd65590443ef941317ce9ae | 8e0641629283b0dfd0cd9f8410e6306afeeec183 | refs/heads/master | 2022-12-11T15:54:22.438842 | 2020-09-10T05:28:51 | 2020-09-10T05:28:51 | 294,078,916 | 0 | 0 | null | 2020-09-10T05:28:53 | 2020-09-09T10:25:56 | Python | UTF-8 | Python | false | false | 323 | py | def any_lowercase5(s):
for c in s:
if not c.islower():
return False
return True
print(any_lowercase5('DAY'))
print(any_lowercase5('Day'))
print(any_lowercase5('dAY'))
print(any_lowercase5('day'))
print(any_lowercase5('DAy'))
print(any_lowercase5('dAy'))
print(any_lowercase5('daY')) | [
"noreply@github.com"
] | noreply@github.com |
15c2f5343620cc2bd92a19eaf89edeb81b65f80d | ba3bdea3133e8efc33498b80169be6d7b103c3c4 | /blog/admin.py | 75bf66c97971eb9575a109a0c665834e1c29b43c | [] | no_license | kurtchen1988/my_blog | c7e714ae2ec7fcc1336c36b8c4b1d366ffdd311c | abc7002e654362a32bca1c699a9cf98866654bc1 | refs/heads/master | 2022-12-21T05:11:46.955819 | 2018-11-05T12:27:30 | 2018-11-05T12:27:30 | 154,171,969 | 0 | 0 | null | 2022-12-11T12:17:23 | 2018-10-22T15:52:26 | Python | UTF-8 | Python | false | false | 475 | py | from django.contrib import admin # https://docs.djangoproject.com/en/2.1/ref/contrib/admin/
from .models import BlogType, Blog
# Register your models here.
@admin.register(BlogType)
class BlogTypeAdmin(admin.ModelAdmin):
'''注册博客类型'''
list_display = ('id', 'type_name')
@admin.register(Blog)
class BlogAdmin(admin.ModelAdmin):
'''注册博客内容'''
list_display = ('id', 'title', 'blog_type', 'author', 'get_read_num', 'created_time', 'last_updated_time') | [
"kurtcobain1988824@hotmail.com"
] | kurtcobain1988824@hotmail.com |
13a098a9ebdc2fc23baad794b07470c3bd331964 | b149879b16639d9d2f40eeb8dd289a2658aadad1 | /Projects/PythonGame/Snake.py | 67bba84855fc9df205628c8b65418d6b9c93233d | [] | no_license | buptlrr/PythonProjects | 49c54b8f8c45906802823df196a170b90e9dcf68 | ba641861f6e3f5ad9ebe2e8236c674d8acd1e5c4 | refs/heads/master | 2021-01-01T19:46:20.511724 | 2014-11-15T06:58:03 | 2014-11-15T06:58:03 | 26,671,393 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py | from Tkinter import *
import random
class snake(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.body = [(0,0)]
self.bodyid = []
self.food = [ -1, -1 ]
self.foodid = -1
self.gridcount = 10
self.size = 500
self.di = 3
self.speed = 500
self.top = self.winfo_toplevel()
self.top.resizable(False, False)
self.grid()
self.canvas = Canvas(self)
self.canvas.grid()
self.canvas.config(width=self.size, height=self.size,relief=RIDGE)
self.drawgrid()
s = self.size/self.gridcount
id = self.canvas.create_rectangle(self.body[0][0]*s,self.body[0][1]*s,
(self.body[0][0]+1)*s, (self.body[0][1]+1)*s, fill="yellow")
self.bodyid.insert(0, id)
self.bind_all("<KeyRelease>", self.keyrelease)
self.drawfood()
self.after(self.speed, self.drawsnake)
def drawgrid(self):
s = self.size/self.gridcount
for i in range(0, self.gridcount+1):
self.canvas.create_line(i*s, 0, i*s, self.size)
self.canvas.create_line(0, i*s, self.size, i*s)
def drawsnake(self):
s = self.size/self.gridcount
head = self.body[0]
new = [head[0], head[1]]
if self.di == 1:
new[1] = (head[1]-1) % self.gridcount
elif self.di == 2:
new[0] = (head[0]+1) % self.gridcount
elif self.di == 3:
new[1] = (head[1]+1) % self.gridcount
else:
new[0] = (head[0]-1) % self.gridcount
next = ( new[0], new[1] )
if next in self.body:
exit()
elif next == (self.food[0], self.food[1]):
self.body.insert(0, next)
self.bodyid.insert(0, self.foodid)
self.drawfood()
else:
tail = self.body.pop()
id = self.bodyid.pop()
self.canvas.move(id, (next[0]-tail[0])*s, (next[1]-tail[1])*s)
self.body.insert(0, next)
self.bodyid.insert(0, id)
self.after(self.speed, self.drawsnake)
def drawfood(self):
s = self.size/self.gridcount
x = random.randrange(0, self.gridcount)
y = random.randrange(0, self.gridcount)
while (x, y) in self.body:
x = random.randrange(0, self.gridcount)
y = random.randrange(0, self.gridcount)
id = self.canvas.create_rectangle(x*s,y*s, (x+1)*s, (y+1)*s, fill="yellow")
self.food[0] = x
self.food[1] = y
self.foodid = id
def keyrelease(self, event):
if event.keysym == "Up" and self.di != 3:
self.di = 1
elif event.keysym == "Right" and self.di !=4:
self.di = 2
elif event.keysym == "Down" and self.di != 1:
self.di = 3
elif event.keysym == "Left" and self.di != 2:
self.di = 4
app = snake()
app.master.title("Greedy Snake")
app.mainloop() | [
"l007rr@bupt.edu.cn"
] | l007rr@bupt.edu.cn |
64a16cee3a63dd8751894635b665e2f63df51a93 | caebdf8f0b4349b4b465865d7ef8ae4436f5769f | /tfidf.py | 6d63c1514f904dda4acaac978bc96b72b08e4e8a | [] | no_license | ngoduyvu/DeeepSense-Project | 5cbfd54a5bf1f42e5923cfd709fb98f2d23263e0 | 92a0516b95455ed7e305f9f8a291f8d3d10ea2e4 | refs/heads/master | 2021-01-19T16:40:43.848524 | 2017-11-27T00:05:17 | 2017-11-27T00:05:17 | 101,017,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,673 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 09:06:33 2017
Author: Ngo Duy Vu
Project:DeeepSense customer review
TF-IDF and cosine distance to
find the similar document and keywords
"""
import math
import operator
import string
from nltk.corpus import stopwords
from autocorrect import spell
from replacers import RepeatReplacer, RegexpReplacer
def cleaning_text(sentence):
""" Cleaning text for tfidf """
regex = RegexpReplacer()
repeat = RepeatReplacer()
sentence = sentence.lower()
words = [repeat.replace(i) for i in sentence.split(" ")]
words = [regex.replace(i) for i in words]
sentence = ' '.join(words)
sentence = [spell(i) for i in sentence.split(" ")]
sentence = ' '.join(words)
sentence = [s for s in sentence if s not in string.punctuation]
return ''.join(sentence)
def unique_word(all_document):
""" Create a list of all unique words
all the whole document """
tokenize = lambda doc: doc.split(" ") # Tokenize sentence to words
tokenized_documents = [tokenize(d) for d in all_document]
all_tokens_set = set([item for sublist in tokenized_documents for item in sublist])
return [all_tokens_set, tokenized_documents]
def term_frequency(term, tokenized_document):
""" Count many time word appear in a document """
frequency = tokenized_document.count(term)
return frequency
def sublinear_term_frequency(term, tokenized_document):
""" Normalize the sentence to reduce
the length affect on the frequency of
words """
count = tokenized_document.count(term)
if count == 0:
return 0
return (1+ math.log(count))
def inverse_document_frequencies(list_words, tokenized_documents):
""" The rare words usually carry significant meaning
for a sentence while popular words is not, inversing
the frequencies of rare and popular words to ensure
words have correct weight values """
idf_values = {}
for token in list_words:
contains_token =map(lambda doc: token in doc, tokenized_documents)
idf_values[token] = 1 + math.log(len(tokenized_documents)/(sum(contains_token)))
return idf_values
def tfidf(tokenized_documents, idf):
""" Convert a document into a vector of word
with each value component of the vector is
the weight of words """
tfidf_documents = []
for document in tokenized_documents:
doc_tfidf = []
for term in idf.keys():
tf = sublinear_term_frequency(term, document)
doc_tfidf.append(tf * idf[term])
tfidf_documents.append(doc_tfidf)
return tfidf_documents
def cosine_similarity(vector1, vector2):
""" Measure the distance of 2 vector, the vectors
similar to each other tend to be near in space """
dot_product = sum(p*q for p,q in zip(vector1, vector2))
magnitude = math.sqrt(sum([val**2 for val in vector1])) * math.sqrt(sum([val**2 for val in vector2]))
if not magnitude:
return 0
return (dot_product/magnitude)
def idf_keyword(tokenized_documents, idf_values):
""" Find the keyword base on the weight IDF
of word, the function the five word which have
largest weight value """
counter = 0;
dict_keyword = {}
list_keyword = []
sentence_keyword = []
stops = set(stopwords.words('english'))
for review in tokenized_documents:
gen = (i for i in review if i not in stops)
for word in gen:
if word in idf_values:
dict_keyword[word] = idf_values[word]
else:
dict_keyword[word] = 7
sort_dict = sorted(dict_keyword.items(), key=operator.itemgetter(1), reverse=True)[:5]
for x in sort_dict:
list_keyword.append(x[0])
sentence_keyword.append(set(list_keyword))
list_keyword = []
counter+=1
dict_keyword.clear()
return sentence_keyword
def similar_score(tokenized_documents, idf_values):
""" Function range the document based on similarity
with others, the more different the document is
higher score it get """
tfidf_comparisons = []
tfidf_representation = tfidf(tokenized_documents, idf_values)
for index1, review1 in enumerate(tfidf_representation):
score = 0
for index2, review2 in enumerate(tfidf_representation):
if index1 != index2:
similarity = cosine_similarity(review1, review2)
if similarity >= 0.9:
score = score + 1
elif (similarity < 0.9) & (similarity >= 0.7):
score = score + 2
elif (similarity < 0.7) & (similarity >= 0.5):
score = score + 5
elif (similarity < 0.5) & (similarity >= 0.2):
score = score + 10
elif (similarity < 0.2):
score = score + 20
tfidf_comparisons.append(score)
return tfidf_comparisons
def find_idf(all_documents):
""" Find the keyword and similarity score of
the document """
all_reviews = []
for review in all_documents:
sentence = cleaning_text(review)
all_reviews.append(sentence)
list_words, tokenized_documents = unique_word(all_reviews)
idf_values = inverse_document_frequencies(list_words, tokenized_documents)
similar = similar_score(tokenized_documents, idf_values)
keyword = idf_keyword(tokenized_documents, idf_values)
return [similar, keyword]
| [
"noreply@github.com"
] | noreply@github.com |
4309e198e50c8cdd0ffbbd7d0ff4b1b9bf70e148 | f45ec7eaa2da46f126bc43cc8c8d855853ff903e | /helper/newTrace/WorkloadE/newgraphWithMem.py | fe63ef998963d92dc77e578c613d76fd52785263 | [] | no_license | grantwra/Benchmark_withJson | a593f6d366b3d6037eb4f795c1bfbd620ece4e15 | 363b6b6976673b82b38d7fe1ebbc2fd9711294a4 | refs/heads/master | 2020-12-01T19:17:46.137817 | 2017-02-02T20:35:32 | 2017-02-02T20:35:32 | 65,847,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,497 | py | #usage: python newgraph.py 'filename' SQL || BDB
import sys
import matplotlib.pyplot as plt
import numpy as np
filename = sys.argv[1]
start = 0
c_switch = 0
sched_timestamps = []
sched_switches = []
cumulative_time = 0
cumul_time = []
block = {}
block_op_type = {}
block_insert = 0
block_complete = 0
block_inserted = []
block_insert_timestamps = []
block_completed = []
block_complete_timestamps = []
def ret_color(op_type):
if(op_type == 'read'):
return 'y'
if(op_type == 'write sync'):
return 'g'
if(op_type == 'write sync flush'):
return 'b'
with open(filename,'r') as log:
for line in log:
if('START: App' in line):
columns = line.split()
pid = columns[0].split('-')[1]
elif(sys.argv[2]+'_START' in line):
line = line.strip()
columns = line.split()
start_time = columns[3][:-1]
start = float(start_time)*1000
cumul_time.append(0);
elif(sys.argv[2]+'_END' in line):
line = line.strip()
columns = line.split()
end_time = columns[3][:-1]
break
with open(filename,'r') as log:
while(not(start_time in log.readline())):
pass
line = log.readline()
while(not(end_time in line)):
if(pid in line and 'sched_switch' in line):
line = line.strip()
#line = line
columns = line.split()
c_switch += 1
sched_timestamps.append(float(columns[3][:-1])*1000)
if(('next_pid='+pid) in line):
start = sched_timestamps[-1]
cumul_time.append(cumul_time[-1])
sched_switches.append(0)
else:
cumulative_time = cumulative_time + sched_timestamps[-1] - start
cumul_time.append(cumulative_time)
sched_switches.append(1)
if(pid in line and 'block_rq_insert' in line):
line = line.strip()
columns = line.split()
block_insert += 1
block_inserted.append(block_insert)
block_insert_timestamps.append(float(columns[3][:-1])*1000)
block[(columns[9]+' + '+columns[11])] = [block_insert_timestamps[-1]]
op_type = ''
if('R' in columns[6]):
op_type += 'read '
if('W' in columns[6]):
op_type += 'write '
if('S' in columns[6]):
op_type += 'sync '
if('F' in columns[6]):
op_type += 'flush '
block_op_type[(columns[9]+' + '+columns[11])] = op_type.strip()
elif('block_rq_complete' in line):
line = line.strip()
columns = line.split()
if((columns[8]+' + '+columns[10]) in block):
block_complete += 1
block_completed.append(block_complete)
block_complete_timestamps.append(float(columns[3][:-1])*1000)
block[(columns[8]+' + '+columns[10])].append(block_complete_timestamps[-1]);
line = log.readline()
end=float(line.split()[3][:-1])
#fig, (ax1, ax2) = plt.subplots(2,1,sharex = True)
fig, (ax1, ax2, ax3) = plt.subplots(3,1,sharex = True)
#ax3 = fig.add_subplot(212)
sched_timestamps = np.array(sched_timestamps) - float(start_time)*1000
cumul_time = np.array(cumul_time)
cumul_time = np.append(cumul_time, cumulative_time + float(end_time)*1000 - start)
block_insert_timestamps = np.array(block_insert_timestamps) - float(start_time)*1000
block_complete_timestamps = np.array(block_complete_timestamps) - float(start_time)*1000
x_range = [0,float(end_time)*1000 - float(start_time)*1000]
plt.xlim(x_range)
ax1.set_title('DB Operations Over Length of Trace (' + sys.argv[2] + ')')
sched_timestamps = np.insert(sched_timestamps,0,0)
sched_switches = np.insert(sched_switches,0,0)
sched_timestamps = np.append(sched_timestamps,float(end_time)*1000 - float(start_time)*1000)
sched_switches = np.append(sched_switches,1)
#ax1.step(sched_timestamps,sched_switches,'k',label='')
ax1.step(sched_timestamps,sched_timestamps,'k',label='')
ax1.set_ylim((0,2))
#ax1.set_ylabel('Number of Writes/Reads')
block_insert_timestamps = np.insert(block_insert_timestamps,0,0)
block_inserted = np.insert(block_inserted,0,0)
block_complete_timestamps = np.insert(block_complete_timestamps,0,0)
block_completed = np.insert(block_completed,0,0)
'''
ax2.set_title('block_operations')
ax2.plot(block_insert_timestamps,block_inserted,'o',label='')
ax2.plot(block_insert_timestamps,block_inserted,'k',label='block operation start event')
ax2.plot(block_complete_timestamps,block_completed,'o',label='')
ax2.plot(block_complete_timestamps,block_completed,'k',label='block operation end event')
ax2.set_ylabel('number of block operations')
legends2 = ax2.legend(loc='lower right')
'''
count = 1
for key,block_op in sorted(block.items(), key=lambda t:t[1][0]):
#count += 0.25
#count += 1
op_type = block_op_type[key]
block_op = np.array(block_op) - float(start_time)*1000
ax1.hlines(count,block_op[0],block_op[0],ret_color(op_type),lw='10.5',label=op_type)
ax1.vlines(block_op[0],count+0.05,count-0.25,ret_color(op_type),lw='10',label='')
ax1.vlines(block_op[0],count+0.05,count-0.25,ret_color(op_type),lw='10',label='')
count += 1
ax1.set_ylabel('Number of Writes/Reads (' + str(count) + ' Total)')
handles, labels = ax1.get_legend_handles_labels()
i = 1
while i<len(labels):
if(labels[i] in labels[:i]):
del(labels[i])
del(handles[i])
else:
i += 1
legends = ax1.legend(handles,labels,loc='upper left')
ax1.set_ylim((0,count+1))
ax3.set_title('Trace Time')
ax3.plot(sched_timestamps,cumul_time)
ax3.set_ylabel('Time Spent on Ops (ms)')
temp_filename = 'Memory' + sys.argv[2]
memory_usage = []
with open(temp_filename,'r') as log:
for line in log:
columns = line.split()
memory_usage.append(columns[2])
memcount = 0;
for x in memory_usage:
memcount += 1
timecount = 0;
for i in cumul_time:
timecount += 1
difference = timecount/memcount
diff_mod = timecount % memcount
mem_usage_final = []
memcount = 0
for num in memory_usage:
temp = 0
#mem_usage_final.append(num)
#memcount += 1
while(temp != difference):
mem_usage_final.append(num)
temp += 1
memcount += 1
count = 0
mem_use3 = []
if diff_mod != 0:
for num2 in mem_usage_final:
mem_use3.append(num2)
if count == memcount:
temp1 = 0
while(temp1 != diff_mod):
mem_use3.append(num2)
temp1 += 1
count += 1
'''
test1 = 0
test2 = 0
for i in mem_use3:
test1 += 1
for i in cumul_time:
test2 += 1
f = open('thisIsATest', 'w')
f.write('mem ' + str(test1) + '\n')
f.write('cumu ' + str(test2) + '\n')
f. write('mod '+ str(diff_mod) + '\n')
for x in mem_use3:
f.write(x + '\n')
f.close()
'''
ax2.set_title('Memory Usage')
ax2.plot(sched_timestamps,mem_use3)
#ax2.plot(mem_use3,cumul_time)
ax2.set_ylabel('Bytes available in Memory (e*10^8)')
fig.suptitle(sys.argv[1])
fig.text(0.5, 0.04, 'Total Runtime(in ms)', ha='center', va='center')
plt.show()
| [
"grantwra@buffalo.edu"
] | grantwra@buffalo.edu |
d2b0c1495e93219e94d9bd2315ce951915cea7a6 | 5961fd8dd1c047b85e9a3cce22b0fc58c575f420 | /vrep_gym/vrep/__init__.py | b34ea56993ac6e586538ea2ba336f0c42cf51cde | [] | no_license | anand-bala/vrep-gym | edcc0603cdf5e7513c7fe198372e3af9c4413f03 | a9066c689dae9639b67b16a8390b17a8985015e7 | refs/heads/master | 2020-04-24T22:50:06.780116 | 2019-02-28T03:01:18 | 2019-02-28T03:01:18 | 172,324,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from vrep_gym.vrep.api import VREPSim, VREPObject
__all__ = [
'VREPSim',
'VREPObject',
]
| [
"anandbala1597@gmail.com"
] | anandbala1597@gmail.com |
bc7735fce206af38f2cf083763163c375849f5e8 | bf6e6e7be2323c3fb99a9998c67ad1975e7b80d4 | /0_startcamp/day 3/am_i_lucky_practice2.py | 84cbb7786711dd6314a9ded68b43bc9094597265 | [] | no_license | temporarykwon/TIL | 04fdf98ba7f9ebfa09a17e62782f9849c649fddc | 83f31898e0237d2b330fce6f1679cd1d7865088e | refs/heads/master | 2020-04-12T02:51:16.895716 | 2019-01-18T11:22:11 | 2019-01-18T11:22:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | my_numbers = set([1, 2, 3, 4, 5, 8])
real_numbers = set([1, 2, 3, 4, 5, 6])
bonus = 7
match_count = len(my_numbers & real_numbers)
print(match_count)
if match_count == 6: # same meaning of if diff == 0:
print('1등')
elif match_count == 5 and bonus in my_numbers:
print('2등')
elif match_count == 5:
print('3등')
elif match_count == 4:
print('4등')
elif match_count == 3:
print('5등')
else:
print('6등')
# {1, 2, 3} 클래스는 set으로 된다.
# {1, 2, 3} - {2, 3, 4} = {1} | [
"kwonzoidsbob@gmail.com"
] | kwonzoidsbob@gmail.com |
77d2fa83d35599a5b053874fa4654b5d4fae6602 | 7e72c17745625a1dd4d04f1787c1d2b7bd90642f | /htmlgen/attribute.pyi | 7d17093d5f6cc7d37287a665c5b87a2b0710bba8 | [
"MIT"
] | permissive | ra2003/python-htmlgen | 27de75b94ad3b635caf11d26fa64f4a19e543668 | cbe74d89acd655b78ffe12773b16ef2036502514 | refs/heads/master | 2022-04-08T10:37:36.265349 | 2020-03-11T13:46:53 | 2020-03-11T13:46:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,239 | pyi | import datetime
from typing import Optional, List, Iterable
from htmlgen.element import Element
class html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[str] = ...
) -> None: ...
def __get__(
self, obj: Element, type: Optional[type] = ...
) -> Optional[str]: ...
def __set__(self, obj: Element, value: Optional[str]) -> None: ...
class boolean_html_attribute(object):
def __init__(self, attribute_name: str) -> None: ...
def __get__(self, obj: Element, type_: Optional[type] = ...) -> bool: ...
def __set__(self, obj: Element, value: bool) -> None: ...
class int_html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[int] = ...
) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> Optional[int]: ...
def __set__(self, obj: Element, value: Optional[int]) -> None: ...
class float_html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[float] = ...
) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> Optional[float]: ...
def __set__(self, obj: Element, value: Optional[float]) -> None: ...
class time_html_attribute(object):
def __init__(
self, attribute_name: str, default: Optional[datetime.time] = None
) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> Optional[datetime.time]: ...
def __set__(
self, obj: Element, value: Optional[datetime.time]
) -> None: ...
class list_html_attribute(object):
def __init__(self, attribute_name: str) -> None: ...
def __get__(
self, obj: Element, type_: Optional[type] = ...
) -> List[str]: ...
def __set__(self, obj: Element, value: Iterable[str]) -> None: ...
class data_attribute(html_attribute):
def __init__(
self, data_name: str, default: Optional[str] = None
) -> None: ...
class css_class_attribute(object):
def __init__(self, css_class: str) -> None: ...
def __get__(self, obj: Element, type_: Optional[type] = ...) -> bool: ...
def __set__(self, obj: Element, value: bool) -> None: ...
| [
"srittau@rittau.biz"
] | srittau@rittau.biz |
30439a393e056cc7000eab110322533ac9ba4ff9 | 0d9e5e1d10fd8a0692f1bc77122499991e071d63 | /week4-python/ImageSmoothing/boxBlur.py | 64e6096da81b84b735028ad8e0a5be626d3c7b62 | [] | no_license | gpokhark/OpenCV-CV1 | 06ec457d263b75c4789b0a7a2a7248f573a99d8a | 487f2ce145802b8261c921c4e646d0c67670e1fa | refs/heads/master | 2022-12-13T08:18:06.059507 | 2020-09-07T21:55:51 | 2020-09-07T21:55:51 | 262,436,580 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import cv2
import numpy as np
from dataPath import DATA_PATH
filename = DATA_PATH+"images/gaussian-noise.png"
# Load an image
img = cv2.imread(filename)
# Apply box filter - kernel size 3
dst1=cv2.blur(img,(3,3),(-1,-1))
# Apply box filter - kernel size 7
dst2=cv2.blur(img,(7,7),(-1,-1))
cv2.imshow("Original Image", img)
cv2.waitKey(0)
cv2.imshow("Box Blur Result 1 : KernelSize = 3", dst1)
cv2.waitKey(0)
cv2.imshow("Box Blur Result 1 : KernelSize = 7", dst2)
cv2.waitKey(0)
| [
"gpokhark@gmail.com"
] | gpokhark@gmail.com |
b31a19f61f75d84e9c43cae789ca4a9fafb8dfc3 | 3cae667175b2d6aac6d7f3d8189e9a02c38ea1cf | /AOJ/ITP1/python/ITP1_3_B_Print_Test_Cases.py | 01ada1baf19ee14e9ca3f502aaf3c19915bc6f52 | [] | no_license | kokorinosoba/contests | 3ee14acf729eda872ebec9ec7fe3431f50ae23c2 | 6e0dcd7c8ee086650d89fc65616981361b9b20b9 | refs/heads/master | 2022-08-04T13:45:29.722075 | 2022-07-24T08:50:11 | 2022-07-24T08:50:11 | 149,092,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | for i,e in enumerate(list(open(0))[:-1],1):print(f'Case {i}:',e,end='')
"""
i=1
while 1:
n=input()
if n=="0": break
print(f"Case {i}: {n}")
i+=1
"""
"""
import sys
for i,x in enumerate(sys.stdin,1):
if x=="0\n":break
print(f"Case {i}: {x}",end="")
"""
| [
"34607448+kokorinosoba@users.noreply.github.com"
] | 34607448+kokorinosoba@users.noreply.github.com |
9ca45cc581947b9ffa18024c1e68fc812623bfaa | 3036ea8b4385deaf9942d43f95308e249c140e0e | /test_weight_entry.py | 6ad2229e520fe5f5575d21f2f978fef2ed045d16 | [] | no_license | hbprice65/classworkFall21 | 0800a84fe4ab054f38936a6903e40993f48db0eb | 9fe79d4e2c871a9d78d87cfbed4b224cf1079111 | refs/heads/main | 2023-08-14T16:13:08.628062 | 2021-09-20T16:34:45 | 2021-09-20T16:34:45 | 399,880,892 | 0 | 0 | null | 2021-09-20T16:34:46 | 2021-08-25T16:09:17 | Python | UTF-8 | Python | false | false | 385 | py | import pytest
@pytest.mark.parametrize("input, expected", [
("22 lb", 10),
("50 kg", 50),
("22.1 lb", 10),
("22 lbs", 10),
("22 KG", 22),
("22 Kg", 22),
("-22 lb", -10)
])
def test_parse_weight_input(input, expected):
from weight_entry import parse_weight_input
answer = parse_weight_input(input)
assert answer == expected
| [
"hp48@duke.edu"
] | hp48@duke.edu |
ef666dd55a79a37dd98a1d01931f9f8bacf94013 | b4527ccdc9dd8b525deee070653f3ab0f14e28cd | /CarParser/CarParser.py | 66e2dfbd6c025ba3598c7dfa058656451ed6d65d | [] | no_license | HardCorn/pythonScripts | 041f1dd9d1378f1db8b9f39668dd2ea72aa9802a | 090a47f6a126d1ac86133de1ca779f566887a739 | refs/heads/master | 2021-04-28T17:11:30.193737 | 2018-07-08T18:36:33 | 2018-07-08T18:36:33 | 121,848,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,827 | py | import csv
import os
class BlankObj:
def __init__(self, blank=None, is_exception=False):
self.blank = blank if not (is_exception and not issubclass(type(blank), BaseException)) else BaseException(blank)
self.is_exception = issubclass(type(blank), BaseException) or is_exception
def __repr__(self):
return repr(self.blank)
def __str__(self):
return str(self.blank)
def release(self):
if self.is_exception:
raise self.blank
return self.blank
class FileReader:
def __init__(self, file_path, blank=BlankObj()):
self.path = file_path
if isinstance(blank, BlankObj):
self.blank = blank
else:
self.blank = BlankObj(blank)
def __read__(self):
pass
def __re_alert__(self, reader, blank):
try:
return reader()
except IOError:
return blank.release()
else:
raise BaseException('unknown error')
def read(self):
return self.__re_alert__(self.__read__, self.blank)
def simple_read(self):
return self.__read__()
class TxtFileReader(FileReader):
def __init__(self, path, blank=BlankObj('')):
super().__init__(path, blank)
def __read__(self):
result = ''
with open(self.path) as file:
result = file.read()
return result
class CsvReader(FileReader):
def __init__(self, path, blank=BlankObj([]), delimiter=';'):
super().__init__(path, blank)
self.delimiter = delimiter
def __read__(self):
res = []
with open(self.path) as f_csv:
reader = csv.reader(f_csv, delimiter=self.delimiter)
next(reader)
for row in reader:
res.append(row)
return res
class BaseCar:
def __init__(self, photo, brand, carry):
self.photo_file_name = photo
self.brand = brand
self.carrying = float(carry)
if self.get_photo_file_ext() == '':
raise ValueError('Bad Photo Extension')
def get_photo_file_ext(self):
res = os.path.splitext(self.photo_file_name)
return res[1]
class Car(BaseCar):
def __init__(self, photo, brand, carry, passengers):
super().__init__(photo, brand, carry)
self.passenger_seats_count = int(passengers)
class Truck(BaseCar):
def __init__(self, photo, brand, carry, body_whl):
super().__init__(photo, brand, carry)
self.body_width = 0.0
self.body_height = 0.0
self.body_length = 0.0
self.get_body_volume(body_whl)
def get_body_volume(self, body_whl):
if body_whl != '':
whl_list = str(body_whl).split('x')
if len(whl_list) != 3:
raise ValueError('Bad whl')
self.body_length = float(whl_list[0])
self.body_width = float(whl_list[1])
self.body_height = float(whl_list[2])
class SpecCar(BaseCar):
def __init__(self, photo, brand, carry, extra):
super().__init__(photo, brand, carry)
self.extra = extra
class CsvRow:
def __init__(self, type, brand, passengers, photo, whl, carry, extra):
self.type = type
self.brand = brand
self.passengers = passengers
self.photo = photo
self.whl = whl
self.carry = carry
self.extra = extra
def create_car(self):
if self.type == 'car':
return Car(self.photo, self.brand, self.carry, self.passengers)
elif self.type == 'truck':
return Truck(self.photo, self.brand, self.carry, self.whl)
elif self.type == 'spec_machine':
return SpecCar(self.photo, self.brand, self.carry, self.extra)
def get_car_list(csv_filename):
file = CsvReader(csv_filename)
csv_ = file.read()
result = []
for row in csv_:
try:
tmp = CsvRow(*row)
result.append(tmp.create_car())
except (ValueError, TypeError) as d:
print(f'Incorret file row: {row}. Exception: {d.args[0]}')
return result
if __name__ == '__main__':
total = get_car_list(r'D:\Users\HardCorn\Desktop\python\coursera_week3_cars.csv')
print(total)
for each in total:
print (each.__dict__)
# a = ValueError('некий текст')
# b = BlankObj(None, True)
# f = CsvReader(r'D:\Users\HardCorn\Desktop\python\coursera_week3_cars.csv',b)
# res = f.read()
# print(res)
# f2 = TxtFileReader(r'D:\Users\HardCorn\Desktop\python\coursera_week3_cars.csv',a)
# res2 = f2.read()
# print(res2)
# print(repr(b))
# get_car_list(r'D:\Users\HardCorn\Desktop\python\coursera_week3_cars.csv')
# a = BaseCar(r'sdf/sdfasdf/sdfo/sd.ffx', 'honda', '20')
# print(a.get_photo_file_ext())
# | [
"kuz-proper@list.ru"
] | kuz-proper@list.ru |
8b30cb6bd87a5879669d17eadf69b563911511f2 | c876544555a8903ba83934130685897f7960feb3 | /nanonispy/tests/test_read.py | 2f2174099d23a01add632126707aba98f5c26ed1 | [
"MIT"
] | permissive | Ex-libris/nanonispy | 61f882e405854c9a7bea4caf9767e2cd9916b14c | 0183c1147d160825b5c2c5c9e39e51efe033ce86 | refs/heads/master | 2021-07-14T10:04:58.591597 | 2017-07-07T01:30:24 | 2017-07-07T01:30:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,965 | py | import unittest
import tempfile
import os
import numpy as np
import nanonispy as nap
class TestNanonisFileBaseClass(unittest.TestCase):
"""
Testing class for NanonisFile base class.
"""
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_is_instance_nanonis_file(self):
"""
Check for correct instance of NanonisFile object.
"""
f = tempfile.NamedTemporaryFile(mode='wb',
suffix='.3ds',
dir=self.temp_dir.name,
delete=False)
f.write(b':HEADER_END:')
f.close()
NF = nap.read.NanonisFile(f.name)
self.assertIsInstance(NF, nap.read.NanonisFile)
def test_unsupported_filetype(self):
"""
Handle unsupported file gracefully.
"""
with self.assertRaises(nap.read.UnhandledFileError):
f = tempfile.NamedTemporaryFile(mode='wb',
suffix='.txt',
dir=self.temp_dir.name,
delete=False)
f.close()
NF = nap.read.NanonisFile(f.name)
def test_3ds_suffix_parsed(self):
"""
3ds file recognized.
"""
f = tempfile.NamedTemporaryFile(mode='wb',
suffix='.3ds',
dir=self.temp_dir.name,
delete=False)
f.write(b':HEADER_END:')
f.close()
NF = nap.read.NanonisFile(f.name)
self.assertEqual(NF.filetype, 'grid')
def test_sxm_suffix_parsed(self):
"""
Sxm file recognized.
"""
f = tempfile.NamedTemporaryFile(mode='wb',
suffix='.sxm',
dir=self.temp_dir.name,
delete=False)
f.write(b'SCANIT_END')
f.close()
NF = nap.read.NanonisFile(f.name)
self.assertEqual(NF.filetype, 'scan')
def test_dat_suffix_parsed(self):
"""
Dat file recognized.
"""
f = tempfile.NamedTemporaryFile(mode='wb',
suffix='.dat',
dir=self.temp_dir.name,
delete=False)
f.write(b'[DATA]')
f.close()
NF = nap.read.NanonisFile(f.name)
self.assertEqual(NF.filetype, 'spec')
def test_find_start_byte(self):
f = tempfile.NamedTemporaryFile(mode='wb',
suffix='.3ds',
dir=self.temp_dir.name,
delete=False)
f.write(b'header_entry\n:HEADER_END:\n')
f.close()
NF = nap.read.NanonisFile(f.name)
byte_offset = NF.start_byte()
self.assertEqual(byte_offset, 26)
def test_no_header_tag_found(self):
with self.assertRaises(nap.read.FileHeaderNotFoundError):
f = tempfile.NamedTemporaryFile(mode='wb',
suffix='.3ds',
dir=self.temp_dir.name,
delete=False)
f.close()
NF = nap.read.NanonisFile(f.name)
def test_header_raw_is_str(self):
f = tempfile.NamedTemporaryFile(mode='wb',
suffix='.3ds',
dir=self.temp_dir.name,
delete=False)
f.write(b'header_entry\n:HEADER_END:\n')
f.close()
NF = nap.read.NanonisFile(f.name)
self.assertIsInstance(NF.header_raw, str)
class TestGridFile(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def create_dummy_grid_data(self, suffix='3ds'):
"""
return tempfile file object with dummy header info
"""
f = tempfile.NamedTemporaryFile(mode='wb',
suffix=suffix,
dir=self.temp_dir.name,
delete=False)
f.write(b'Grid dim="230 x 230"\r\nGrid settings=4.026839E-8;-4.295725E-8;1.500000E-7;1.500000E-7;0.000000E+0\r\nSweep Signal="Bias (V)"\r\nFixed parameters="Sweep Start;Sweep End"\r\nExperiment parameters="X (m);Y (m);Z (m);Z offset (m);Settling time (s);Integration time (s);Z-Ctrl hold;Final Z (m)"\r\n# Parameters (4 byte)=10\r\nExperiment size (bytes)=2048\r\nPoints=512\r\nChannels="Input 3 (A)"\r\nDelay before measuring (s)=0.000000E+0\r\nExperiment="Grid Spectroscopy"\r\nStart time="21.10.2014 16:48:06"\r\nEnd time="23.10.2014 10:42:19"\r\nUser=\r\nComment=\r\n:HEADER_END:\r\n')
a = np.linspace(0, 100.0, 230*230*(10+512))
b = np.asarray(a, dtype='>f4')
b.tofile(f)
f.close()
return f
def create_dummy_grid_data_v2(self, suffix='3ds'):
"""
return tempfile file object with dummy header info
"""
f = tempfile.NamedTemporaryFile(mode='wb',
suffix=suffix,
dir=self.temp_dir.name,
delete=False)
f.write(b'Grid dim="230 x 230"\r\nGrid settings=4.026839E-8;-4.295725E-8;1.500000E-7;1.500000E-7;0.000000E+0\r\nFiletype=Linear\r\nSweep Signal="Bias (V)"\r\nFixed parameters="Sweep Start;Sweep End"\r\nExperiment parameters="X (m);Y (m);Z (m);Z offset (m);Settling time (s);Integration time (s);Z-Ctrl hold;Final Z (m)"\r\n# Parameters (4 byte)=10\r\nExperiment size (bytes)=2048\r\nPoints=512\r\nChannels="Input 3 (A)"\r\nDelay before measuring (s)=0.000000E+0\r\nExperiment="Grid Spectroscopy"\r\nStart time="21.10.2014 16:48:06"\r\nEnd time="23.10.2014 10:42:19"\r\nUser=\r\nComment=\r\n:HEADER_END:\r\n')
a = np.linspace(0, 100.0, 230*230*(10+512))
b = np.asarray(a, dtype='>f4')
b.tofile(f)
f.close()
return f
def test_is_instance_grid_file(self):
"""
Check for correct instance of Grid object.
"""
f = self.create_dummy_grid_data()
GF = nap.read.Grid(f.name)
self.assertIsInstance(GF, nap.read.Grid)
def test_data_has_right_shape(self):
f = self.create_dummy_grid_data()
GF = nap.read.Grid(f.name)
self.assertEqual(GF.signals['Input 3 (A)'].shape, (230, 230, 512))
def test_sweep_signal_calculated(self):
f = self.create_dummy_grid_data()
GF = nap.read.Grid(f.name)
self.assertEqual(GF.signals['sweep_signal'].shape, (512,))
def test_raises_correct_instance_error(self):
with self.assertRaises(nap.read.UnhandledFileError):
f = self.create_dummy_grid_data(suffix='sxm')
GF = nap.read.Grid(f.name)
def test_header_entries(self):
f = self.create_dummy_grid_data()
GF = nap.read.Grid(f.name)
test_dict = {'angle': '0.0',
'channels': "['Input 3 (A)']",
'comment': '',
'dim_px': '[230, 230]',
'end_time': '23.10.2014 10:42:19',
'experiment_name': 'Grid Spectroscopy',
'experiment_size': '2048',
'experimental_parameters': "['X (m)', 'Y (m)', 'Z (m)', 'Z offset (m)', 'Settling time (s)', 'Integration time (s)', 'Z-Ctrl hold', 'Final Z (m)']",
'fixed_parameters': "['Sweep Start', 'Sweep End']",
'measure_delay': '0.0',
'num_channels': '1',
'num_parameters': '10',
'num_sweep_signal': '512',
'pos_xy': '[4.026839e-08, -4.295725e-08]',
'size_xy': '[1.5e-07, 1.5e-07]',
'start_time': '21.10.2014 16:48:06',
'sweep_signal': 'Bias (V)',
'user': ''}
for key in GF.header:
a = ''.join(sorted(str(GF.header[key])))
b = ''.join(sorted(test_dict[key]))
self.assertEqual(a, b)
def test_both_header_formats(self):
f = self.create_dummy_grid_data()
f2 = self.create_dummy_grid_data_v2()
GF = nap.read.Grid(f.name)
GF2 = nap.read.Grid(f2.name)
self.assertEqual(GF.header, GF2.header)
class TestScanFile(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def create_dummy_scan_data(self, suffix='.sxm'):
"""
return tempfile file object with dummy header info
"""
f = tempfile.NamedTemporaryFile(mode='wb',
suffix=suffix,
dir=self.temp_dir.name,
delete=False)
f.write(b':NANONIS_VERSION:\n2\n:SCANIT_TYPE:\n FLOAT MSBFIRST\n:REC_DATE:\n 21.11.2014\n:REC_TIME:\n17:19:32\n:REC_TEMP:\n 290.0000000000\n:ACQ_TIME:\n 470.3\n:SCAN_PIXELS:\n 64 64\n:SCAN_FILE:\nC:\\STM data\\2014-11\\2014-11-21\\ScanAg111_November2014_001.sxm\n:SCAN_TIME:\n 3.533E+0 3.533E+0\n:SCAN_RANGE:\n 1.500000E-7 1.500000E-7\n:SCAN_OFFSET:\n 7.217670E-8 2.414175E-7\n:SCAN_ANGLE:\n 0.000E+0\n:SCAN_DIR:\nup\n:BIAS:\n -5.000E-2\n:Z-CONTROLLER:\n\tName\ton\tSetpoint\tP-gain\tI-gain\tT-const\n\tCurrent #3\t1\t1.000E-10 A\t7.000E-12 m\t3.500E-9 m/s\t2.000E-3 s\n:COMMENT:\n\n:NanonisMain>Session Path:\nC:\\STM data\\2014-11\\2014-11-21\n:NanonisMain>SW Version:\nGeneric 4\n:NanonisMain>UI Release:\n3180\n:NanonisMain>RT Release:\n3130\n:NanonisMain>RT Frequency (Hz):\n5E+3\n:NanonisMain>Signals Oversampling:\n10\n:NanonisMain>Animations Period (s):\n20E-3\n:NanonisMain>Indicators Period (s):\n300E-3\n:NanonisMain>Measurements Period (s):\n500E-3\n:DATA_INFO:\n\tChannel\tName\tUnit\tDirection\tCalibration\tOffset\n\t14\tZ\tm\tboth\t-3.480E-9\t0.000E+0\n\t2\tInput_3\tA\tboth\t1.000E-9\t0.000E+0\n\t20\tLIX_1_omega\tA\tboth\t1.000E+0\t0.000E+0\n\t21\tLIY_1_omega\tA\tboth\t1.000E+0\t0.000E+0\n\n:SCANIT_END:\n')
a = np.linspace(0, 100.0, 1+4*2*64*64)
b = np.asarray(a, dtype='>f4')
b.tofile(f)
f.close()
return f
def test_header_entries(self):
f = self.create_dummy_scan_data()
SF = nap.read.Scan(f.name)
test_dict = {'acq_time': '470.3',
'bias': '-0.05',
'comment': '',
'data_info': "{'Channel': ('14', '2', '20', '21'), 'Unit': ('m', 'A', 'A', 'A'), 'Direction': ('both', 'both', 'both', 'both'), 'Offset': ('0.000E+0', '0.000E+0', '0.000E+0', '0.000E+0'), 'Name': ('Z', 'Input_3', 'LIX_1_omega', 'LIY_1_omega'), 'Calibration': ('-3.480E-9', '1.000E-9', '1.000E+0', '1.000E+0')}",
'nanonis_version': '2',
'nanonismain>animations period (s)': '20E-3',
'nanonismain>indicators period (s)': '300E-3',
'nanonismain>measurements period (s)': '500E-3',
'nanonismain>rt frequency (hz)': '5E+3',
'nanonismain>rt release': '3130',
'nanonismain>session path': 'C:\\STM data\\2014-11\\2014-11-21',
'nanonismain>signals oversampling': '10',
'nanonismain>sw version': 'Generic 4',
'nanonismain>ui release': '3180',
'rec_date': '21.11.2014',
'rec_temp': '290.0000000000',
'rec_time': '17:19:32',
'scan_angle': '0.000E+0',
'scan_dir': 'up',
'scan_file': 'C:\\STM data\\2014-11\\2014-11-21\\ScanAg111_November2014_001.sxm',
'scan_offset': '[ 7.21767000e-08 2.41417500e-07]',
'scan_pixels': '[64 64]',
'scan_range': '[ 1.50000000e-07 1.50000000e-07]',
'scan_time': '[ 3.533 3.533]',
'scanit_type': 'FLOAT MSBFIRST',
'z-controller': "{'P-gain': ('7.000E-12 m',), 'Setpoint': ('1.000E-10 A',), 'on': ('1',), 'T-const': ('2.000E-3 s',), 'Name': ('Current #3',), 'I-gain': ('3.500E-9 m/s',)}"}
for key in SF.header:
a = ''.join(sorted(str(SF.header[key])))
b = ''.join(sorted(test_dict[key]))
self.assertEqual(a, b)
def test_raises_correct_instance_error(self):
with self.assertRaises(nap.read.UnhandledFileError):
f = self.create_dummy_scan_data(suffix='.3ds')
SF = nap.read.Scan(f.name)
class TestSpecFile(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def create_dummy_spec_data(self, suffix='dat'):
base = os.path.dirname(__file__)
f = open(base+'/Bias-Spectroscopy002.dat', 'rb')
f.close()
return f
def test_header_entries(self):
f = self.create_dummy_spec_data()
SP = nap.read.Spec(f.name)
test_dict = {'Cutoff frq': '',
'Date': '04.08.2015 08:49:41',
'Experiment': 'bias spectroscopy',
'Filter type': 'Gaussian',
'Final Z (m)': 'N/A',
'Integration time (s)': '200E-6',
'Order': '6',
'Settling time (s)': '200E-6',
'User': '',
'X (m)': '-19.4904E-9',
'Y (m)': '-73.1801E-9',
'Z (m)': '-13.4867E-9',
'Z offset (m)': '-250E-12',
'Z-Ctrl hold': 'TRUE'}
for key in SP.header:
a = ''.join(sorted(str(SP.header[key])))
b = ''.join(sorted(test_dict[key]))
self.assertEqual(a, b)
class TestUtilFunctions(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_arr_roundtrip(self):
fname = self.temp_dir.name + '/test_roundtrip.npy'
a = np.linspace(0, 1.00, dtype='>f4')
nap.read.save_array(fname, a)
b = nap.read.load_array(fname)
np.testing.assert_array_equal(a, b)
if __name__ == '__main__':
unittest.main()
| [
"yanns.tremblay@gmail.com"
] | yanns.tremblay@gmail.com |
0042aba77e768718882348e47951b934c08e2a9d | c2a168ec9e91415eeadd53ba6042e614c3e8460c | /benchmark_features/hpopt_1/hpop_test_1/ht_98.py | c901ea3fa3afaaad12754f331e2d90ee977f80d9 | [] | no_license | LiYanChalmers/BoschProductionLine | 530098a9de0d08332511b24a31cdd4b4ec5473fb | de864e55be0e8cd174ccacb06afc77e3dc9ec42a | refs/heads/master | 2020-03-21T20:29:14.134812 | 2018-09-03T08:10:08 | 2018-09-03T08:10:08 | 139,010,159 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,487 | py | # -*- coding: utf-8 -*-
"""
Template for CV parameter search
Tasks:
1. CV
2. Train model
3. Predict on test set
4. Save
a. CV results
b. models trained in CV
c. model trained on the whole train set
d. predictions on test set
To-do:
1. Use models in CV to predict on test set, and save the predictions
a. Rewrite the CV function
b. Overhead of prediction should be small
c. RAM requirement should be small if #columns is not too large
d. In some cases, may need many columns, RAM requirement may be high.
So not implementing this idea now.
"""
import sys
sys.path.insert(0, 'bosch_helper')
from bosch_helper import *
#%% Set parameter
param_id = 98
random_state = 831904
param = {'subsample': 0.9, 'silent': 1, 'objective': 'binary:logistic', 'nthread': 20, 'min_child_weight': 5.5, 'max_depth': 15, 'lambda': 4, 'eta': 0.025, 'colsample_bytree': 0.65, 'booster': 'gbtree', 'base_score': 0.0058, 'alpha': 0}
np.random.seed(random_state)
#%% Load data
x = pd.read_hdf('numeric_b1_b8_nf149_1.hdf', 'x')
y_train = pd.read_hdf('numeric_b1_b8_nf149_1.hdf', 'y_train')
x_train = x.loc['train']
x_test = x.loc['test']
#%%
cv_results, clfs, running_time = \
cross_val_predict_skf_rm_xgb(param, x_train, y_train,
num_boost_round=80,
n_splits=5,
n_repeats=3,
random_state=np.random.randint(10**6),
verbose_eval=True)
results = {'clfs_cv': clfs, 'results_cv': cv_results, 'running_time_cv': running_time}
#%% Train on model
dtrain = xgb.DMatrix(x_train, label=y_train)
param['seed'] = np.random.randint(10**6)
clf = xgb.train(param, dtrain,
num_boost_round=60,
feval=mcc_eval, evals=[(dtrain, 'train')])
y_train_pred = clf.predict(dtrain)
# Find best threshold
thresholds = np.linspace(0.01, 0.99, 400)
mcc = np.array([matthews_corrcoef(y_train, y_train_pred>thr) for thr in thresholds])
best_threshold = thresholds[mcc.argmax()]
results['best_threshold_train'] = best_threshold
results['mcc_max_train'] = mcc.max()
results['clf_train'] = clf
#%% Predict on test set
dtest = xgb.DMatrix(x_test)
y_test_pred = clf.predict(dtest)
y_test_pred_int = (y_test_pred>best_threshold).astype(int)
sub = pd.read_csv("sample_submission.csv.zip", index_col=0)
sub["Response"] = y_test_pred_int
sub.to_csv('ht_98.csv.gz', compression='gzip')
results['y_test_pred_prob'] = y_test_pred
results['y_test_pred_int'] = y_test_pred_int
save_pickle(results, 'ht_98.pickle')
| [
"li.yan.chalmers@gmail.com"
] | li.yan.chalmers@gmail.com |
b48dcf9f5b7d59b2d1a5af62864c0e213f7ea7d7 | 2ab2b78fe54755ba3f653cf34b5f70bd1cb02660 | /manager/migrations/0010_auto_20190204_2339.py | 10b93fc95dd74af90f5c71c0b23b1880cd6eb837 | [] | no_license | rezasblade/cotslandwick | 0658b0554bcb0e508ddd855dfaee549e59ec3aa8 | c62a48d9867d57671be3e1c5f84a4ea2e0f305f2 | refs/heads/master | 2020-04-18T05:03:24.668983 | 2019-02-22T17:34:01 | 2019-02-22T17:34:01 | 167,262,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 582 | py | # Generated by Django 2.1.2 on 2019-02-04 23:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('manager', '0009_player_preferredfoot'),
]
operations = [
migrations.AddField(
model_name='player',
name='position',
field=models.CharField(default='Defender', max_length=20),
),
migrations.AddField(
model_name='player',
name='trait',
field=models.CharField(default='Pays on time', max_length=50),
),
]
| [
"Rezasblade@gmail.com"
] | Rezasblade@gmail.com |
e894dd2c0042e872525cb05a134c54ed4c900387 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/5010/317005010.py | bae484c5693cbe1c4f44c01024c8ae9c43673514 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 756 | py | from bots.botsconfig import *
from records005010 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'SO',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'N1', MIN: 1, MAX: 10, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 2},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'G61', MIN: 0, MAX: 1},
{ID: 'N9', MIN: 0, MAX: 9},
]},
{ID: 'G62', MIN: 1, MAX: 1},
{ID: 'N9', MIN: 1, MAX: 9},
{ID: 'TD5', MIN: 1, MAX: 1},
{ID: 'L0', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'L5', MIN: 0, MAX: 999},
{ID: 'H1', MIN: 0, MAX: 1},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
f7166be9fcfedf3308a08d2311f9df3ea6d9f985 | 17795f81da1b5e242091b0bcf0f29011106eb1d7 | /blog/migrations/0002_auto_20190524_0708.py | de9e066368b3244d0c371edb8ecc1a05f4c508b4 | [] | no_license | Anirudha1995/portfolio | 782a9100b7720fe00a2e2cab219caf66085f0a78 | 02ae1a7dbf02f5ea43f345baefafa5fd1d513538 | refs/heads/master | 2020-05-27T20:15:59.270310 | 2019-05-24T08:48:46 | 2019-05-24T08:48:46 | 188,776,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | # Generated by Django 2.2.1 on 2019-05-24 07:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Blogs',
new_name='Blog',
),
]
| [
"shirolkaranirudha@gmail.com"
] | shirolkaranirudha@gmail.com |
647edb53f15bd0f4088b90b39be7f5102ec74ab1 | dcc42d24c7b01501176acf5a9e0900c5691a71eb | /sar_strategy.py | 7ff9c818cbb023535d2860a8bb31184de5dd78f4 | [] | no_license | zihpzhong/mexbot | cf316d7fa2f506be192e351d2df5cbbf24cb5b37 | d87cb02c0b036c9db1357e18c04f5b41b25b9e7d | refs/heads/master | 2020-03-23T08:47:19.372628 | 2018-07-17T08:30:08 | 2018-07-17T08:30:08 | 141,345,453 | 7 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | # -*- coding: utf-8 -*-
from strategy import Strategy
from indicator import *
def sar_strategy(ticker, ohlcv, position, balance, strategy):
# インジケーター作成
vsar = last(fastsar(ohlcv.high, ohlcv.low, 0.02, 0.06, 0.2))
# ロット数計算
qty_lot = int(balance.BTC.free * 0.02 * ticker.last)
# 最大ポジション数設定
strategy.risk.max_position_size = qty_lot
# 注文(ポジションがある場合ドテン)
if vsar > last(ohlcv.high):
# STOP指値ささらなかったから成り行きでショート
if position.currentQty >= 0:
strategy.entry('S', 'sell', qty=qty_lot, limit=ticker.ask)
vsar = int(vsar)
strategy.entry('L', 'buy', qty=qty_lot, limit=vsar, stop=vsar)
if vsar < last(ohlcv.low):
# STOP指値ささらなかったから成り行きでロング
if position.currentQty <= 0:
strategy.entry('L', 'buy', qty=qty_lot, limit=ticker.bid)
vsar = int(vsar)
strategy.entry('S', 'sell', qty=qty_lot, limit=vsar, stop=vsar)
if __name__ == '__main__':
import settings
import logging
import logging.config
logging.config.fileConfig("logging.conf")
logger = logging.getLogger("SARBot")
strategy = Strategy(sar_strategy)
strategy.settings.timeframe = '1m'
strategy.settings.interval = 10
strategy.settings.apiKey = settings.apiKey
strategy.settings.secret = settings.secret
strategy.testnet.use = True
strategy.testnet.apiKey = settings.testnet_apiKey
strategy.testnet.secret = settings.testnet_secret
strategy.start()
| [
"arms22@gmail.com"
] | arms22@gmail.com |
e64ca823abfcbd82614d6ad46755ce122f1083e3 | 20cc0400066673de3eb06e6fa0d533673aee60fb | /csmapi.py | 72b427bf8990a79233ed284b2988f35ad97283b9 | [] | no_license | IoTtalk/Map_origin | b0314e28c9434a70188fc85a0ad8a58c68c18836 | 9b25d38897dd03fbef81ec99f838685ab26fdd29 | refs/heads/master | 2022-10-31T07:18:50.787275 | 2018-12-17T10:41:10 | 2018-12-17T10:41:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | import requests
ENDPOINT = None
TIMEOUT=10
IoTtalk = requests.Session()
passwordKey = None
class CSMError(Exception):
pass
def register(mac_addr, profile, UsingSession=IoTtalk):
global passwordKey
r = UsingSession.post(
ENDPOINT + '/' + mac_addr,
json={'profile': profile}, timeout=TIMEOUT
)
if r.status_code != 200: raise CSMError(r.text)
else: passwordKey = r.json().get('password')
return True
def deregister(mac_addr, UsingSession=IoTtalk):
r = UsingSession.delete(ENDPOINT + '/' + mac_addr)
if r.status_code != 200: raise CSMError(r.text)
return True
def push(mac_addr, df_name, data, UsingSession=IoTtalk):
r = UsingSession.put(
ENDPOINT + '/' + mac_addr + '/' + df_name,
json={'data': data},
timeout=TIMEOUT,
headers = {'password-key': passwordKey}
)
if r.status_code != 200: raise CSMError(r.text)
return True
def pull(mac_addr, df_name, UsingSession=IoTtalk):
r = UsingSession.get(
ENDPOINT + '/' + mac_addr + '/' + df_name,
timeout=TIMEOUT,
headers = {'password-key': passwordKey}
)
if r.status_code != 200: raise CSMError(r.text)
return r.json()['samples']
def get_alias(mac_addr, df_name, UsingSession=IoTtalk):
r = UsingSession.get(ENDPOINT + '/get_alias/' + mac_addr + '/' + df_name, timeout=TIMEOUT)
if r.status_code != 200: raise CSMError(r.text)
return r.json()['alias_name']
def set_alias(mac_addr, df_name, s, UsingSession=IoTtalk):
r = UsingSession.get(ENDPOINT + '/set_alias/' + mac_addr + '/' + df_name + '/alias?name=' + s, timeout=TIMEOUT)
if r.status_code != 200: raise CSMError(r.text)
return True
def tree(UsingSession=IoTtalk):
r = UsingSession.get(ENDPOINT + '/tree')
if r.status_code != 200: raise CSMError(r.text)
return r.json()
| [
"masterchen.06g@g2.nctu.edu.tw"
] | masterchen.06g@g2.nctu.edu.tw |
96626bb34109b8ab51a91a8421462ed420d9af83 | 4723613aaf8cc5ecb7c94536586615c83a9f63ac | /equations.py | 1a2c980f95ab5cfeeed39be8475daf48247206b0 | [] | no_license | max123522/targil_1 | fe0f696119a74325a17252826e7e9d9347fa28dc | 6c588f55198b22976e549151879f3e0895a991ee | refs/heads/main | 2023-03-24T03:16:06.342865 | 2021-03-16T19:44:44 | 2021-03-16T19:44:44 | 348,471,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,227 | py |
def power(x:float,n:float):
machpela = 1.0
for i in range(int(n)):
machpela = machpela * x
return machpela
def azeret(n):
result3=1.0
for i in range(1,int(n)+1):
result3=result3 * i
return result3
def exponent(marih:float) -> float:
result2=0.0
for i in range(0,50):
result2= result2 + (power(marih,i))/(azeret(i))
return result2
def ln(x:float) -> float:
if x<=0:
return 0
else:
yn=0
result= x-1.0
while((yn-result)>0.001 or (result-yn)>0.001):
yn=result
result = result + 2*((x-exponent(result))/(x+exponent(result)))
return result
def XtimesY(x:float,y:float) -> float:
try:
if(x>0):
a=exponent(ln(x)*y)
result=float('%0.6f' % a)
return result
elif (x<0 and y>=0 and y%1==0):
a=power(x,y)
result=float('%0.6f' % a)
return result
elif (x<0 and y<0 and y%1==0):
a=(power(1/x,-y))
result=float('%0.6f' % a)
return result
elif (x==0):
return 0
else:
return 0
except:
return 0
def sqrt(x:float,y:float) -> float:
try:
if(y>0 and x!=0):
x=1/x
a=(XtimesY(y,x))
result=float('%0.6f' % a)
return result
elif ((y<0 and x%2==0) or (x==0) or (y==0)):
return 0
elif (y<0 and (1/x)%int(1/x)==0 and (1/x)%2!=0):
x=1/x
y=-y
a=(XtimesY(y,x))
result=float('%0.6f' % a)
return result
elif (y<0 and (1/x)%int(1/x)==0 and (1/x)%2==0):
x=1/x
y=-y
a=(XtimesY(y,x))
result=float('%0.6f' % a)
return result
else:
return 0
except:
return 0
def calculate(x:float) -> float:
try:
calc=exponent(x)*XtimesY(7,x)*XtimesY(x,-1)*sqrt(x,x)
result=float('%0.6f' % calc)
return result
except:
return 0
# try:
# num1=input ('enter a num: ')
# x=float(num1)
# print(calculate(x))
# except:
# print(0)
| [
"maxkap1205@gmail.com"
] | maxkap1205@gmail.com |
e4be1f6b38a590a47708ccab19dd9239e95fc5fc | 57eb5f0832f0684fcae7ac84adb0925a2b081cee | /process.py | 272073636bc6d3aa79a7bc4308373755f21d7f71 | [] | no_license | unit02/370A1 | 4ccefcf626f1ac6d6bb728ed799e792c39a3e013 | d7a298f27b208e037530b1d81686a689b910acf8 | refs/heads/master | 2020-04-15T17:06:59.247244 | 2015-08-16T07:53:58 | 2015-08-16T07:53:58 | 40,747,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,664 | py | # A1 for COMPSCI340/SOFTENG370 2015
# Prepared by Robert Sheehan
# Modified by ...
# You are not allowed to use any extra sleep calls.
import threading
import _thread
from random import randint
from time import sleep
from enum import Enum
Type = Enum("Type", "background interactive")
State = Enum("State", "runnable waiting killed")
class Process(threading.Thread):
"""A process."""
next_id = 1
def __init__(self, iosys, dispatcher, type):
"""Construct a process.
iosys - the io subsystem so the process can do IO
dispatcher - so that the process can notify the dispatcher when it has finished
"""
threading.Thread.__init__(self)
self.id = Process.next_id
Process.next_id += 1
self.iosys = iosys
self.dispatcher = dispatcher
self.type = type
self.panel = None
self.daemon = True
self.state = None
self.event = threading.Event()
# You will need a process state variable - self.state
# which should only be modified by the dispatcher and io system.
# the state can be used to determine which list - runnable or waiting the process
# appears in.
# ...
def run(self):
"""Start the process running."""
if self.type == Type.background:
self.run_background()
elif self.type == Type.interactive:
self.run_interactive()
self.dispatcher.proc_finished(self)
def run_interactive(self):
"""Run as an interactive process."""
# Something like the following but you will have to think about
# pausing and resuming the process.
loops = self.ask_user()
while loops > 0:
for i in range(loops):
self.main_process_body()
self.iosys.write(self, "\n")
loops = self.ask_user()
def run_background(self):
"""Run as a background process."""
loops = randint(10, 160)
for i in range(loops):
self.main_process_body()
def ask_user(self):
"""Ask the user for number of loops."""
self.iosys.write(self, "How many loops? ")
input = self.iosys.read(self)
if self.state == State.killed:
_thread.exit()
return int(input)
def main_process_body(self):
# Something like the following but you will have to think about
# pausing and resuming the process.
# check to see if supposed to terminate
if self.state == State.killed:
_thread.exit()
self.event.wait()
self.iosys.write(self, "*")
sleep(0.1)
| [
"elizaorchard@gmail.com"
] | elizaorchard@gmail.com |
de44f671db344112f3455fc9a68fd630b9fa685c | a16feb303b7599afac19a89945fc2a9603ae2477 | /Simple_Python/standard/exception/exception_3.py | c238bdfaf994db7ca61ad080adc0958a24b2cca5 | [] | no_license | yafeile/Simple_Study | d75874745ce388b3d0f9acfa9ebc5606a5745d78 | c3c554f14b378b487c632e11f22e5e3118be940c | refs/heads/master | 2021-01-10T22:08:34.636123 | 2015-06-10T11:58:59 | 2015-06-10T11:58:59 | 24,746,770 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | #! /usr/bin/env/python
# -*- coding:utf-8 -*-
class MyClass(object):
__slots__ = ('attribute',)
o = MyClass()
o.attribute = 'known attribute'
o.not_a_slot = 'new attribute' | [
"zhuzhulang@126.com"
] | zhuzhulang@126.com |
524abfd28d64a72146e89166fa64fa9a07ed750f | 32e63a555fddecd5365854d4d509e8f6c10389f6 | /33.py | 7acbe2d483c886cc98e4cfdc190f485fa40ca905 | [
"MIT"
] | permissive | sukirt01/Python-for-Beginners-Solve-50-Exercises-Live | 8ef11558a7ca48f37d5086ead639deba018bf21c | bb26f0aea6c07e91c011614faf180f4361fbabdc | refs/heads/master | 2022-12-24T23:41:46.216194 | 2020-10-03T15:42:51 | 2020-10-03T15:42:51 | 300,915,104 | 0 | 0 | MIT | 2020-10-03T15:42:52 | 2020-10-03T15:39:05 | Python | UTF-8 | Python | false | false | 806 | py | '''
According to Wikipedia, a semordnilap is a word or phrase that spells a different
word or phrase backwards. ("Semordnilap" is itself "palindromes" spelled backwards.)
Write a semordnilap recogniser that accepts a file name (pointing to a list of words)
from the user and finds and prints all pairs of words that are semordnilaps to the screen.
For example, if "stressed" and "desserts" is part of the word list, the the output
should include the pair "stressed desserts". Note, by the way, that each pair by itself
forms a palindrome!
'''
def semordnilap(x):
f = open(x).read()
words = f.split('\n')
while words:
a = words[0]
words.remove(a)
if a[::-1] in words:
print a + ' and ' + a[::-1] + ' are semordnilap'
semordnilap('33.txt') | [
"garg10may@gmail.com"
] | garg10may@gmail.com |
d7ee00da2734562f112c093249781066a481d129 | 7014a13b6163c9d8882c987bf137c238b4711180 | /Messages/Messages/urls.py | b18fc6f526038554e85c674fa44140fb019848e8 | [] | no_license | gordiig/Un_RSOI_Microservices | 1492be7a9c9d136c26b5db74932d4bd0eaa92ac3 | 11253b684a55f42908724c7278f608de92233ed5 | refs/heads/Dev | 2022-05-08T09:08:05.712865 | 2019-12-09T07:28:39 | 2019-12-09T07:28:39 | 211,895,636 | 0 | 0 | null | 2022-04-22T22:29:23 | 2019-09-30T15:44:04 | Python | UTF-8 | Python | false | false | 840 | py | """Messages URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^api/', include('MessagesApp.urls')),
]
| [
"gordiig@gmail.com"
] | gordiig@gmail.com |
0627bf1ede15181473245fadf4e2ba06f3291f58 | abab0d762aa9968c344d47e0634a20d65883d106 | /mnist_board_2.py | a08f8bf5ec939b470b127bf5a78ebbb29e7b4ea8 | [] | no_license | wangaolong/tensorboard | f670db0d1881e245cc0633b80fd6495e335afb73 | 73974aca258078cd0952e098f942c8d08429ede6 | refs/heads/master | 2020-05-03T03:19:27.718774 | 2019-03-29T11:39:33 | 2019-03-29T11:39:33 | 178,394,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,065 | py | import os
import tensorflow as tf
import urllib
LOGDIR = './mnist/'
mnist = tf.contrib.learn.datasets.mnist.read_data_sets(train_dir=LOGDIR + 'data', one_hot=True)
def conv_layer(input, size_in, size_out, name="conv"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([5, 5, size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
conv = tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding="SAME")
act = tf.nn.relu(conv + b)
return tf.nn.max_pool(act, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def fc_layer(input, size_in, size_out, name="fc"):
with tf.name_scope(name):
w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[size_out]), name="B")
act = tf.nn.relu(tf.matmul(input, w) + b)
return act
def mnist_model(learning_rate, use_two_conv, use_two_fc, hparam):
tf.reset_default_graph()
sess = tf.Session()
# Setup placeholders, and reshape the data
x = tf.placeholder(tf.float32, shape=[None, 784], name="x")
x_image = tf.reshape(x, [-1, 28, 28, 1])
y = tf.placeholder(tf.float32, shape=[None, 10], name="labels")
if use_two_conv:
conv1 = conv_layer(x_image, 1, 32, "conv1")
conv_out = conv_layer(conv1, 32, 64, "conv2")
else:
conv1 = conv_layer(x_image, 1, 64, "conv")
conv_out = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
flattened = tf.reshape(conv_out, [-1, 7 * 7 * 64])
if use_two_fc:
fc1 = fc_layer(flattened, 7 * 7 * 64, 1024, "fc1")
embedding_input = fc1
embedding_size = 1024
logits = fc_layer(fc1, 1024, 10, "fc2")
else:
embedding_input = flattened
embedding_size = 7*7*64
logits = fc_layer(flattened, 7*7*64, 10, "fc")
with tf.name_scope("loss"):
xent = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=y), name="loss")
#tf.summary.scalar("loss", xent)
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(xent)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#tf.summary.scalar("accuracy", accuracy)
#summ = tf.summary.merge_all()
embedding = tf.Variable(tf.zeros([1024, embedding_size]), name="test_embedding")
#assignment = embedding.assign(embedding_input)
#saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
tenboard_dir = './tensorboard/test2/'
writer = tf.summary.FileWriter(tenboard_dir + hparam)
writer.add_graph(sess.graph)
for i in range(2001):
batch = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch[0], y: batch[1]})
def make_hparam_string(learning_rate, use_two_fc, use_two_conv):
conv_param = "conv=2" if use_two_conv else "conv=1"
fc_param = "fc=2" if use_two_fc else "fc=1"
return "lr_%.0E,%s,%s" % (learning_rate, conv_param, fc_param)
def main():
# You can try adding some more learning rates
for learning_rate in [1E-4]:
# Include "False" as a value to try different model architectures
for use_two_fc in [True]:
for use_two_conv in [True]:
# Construct a hyperparameter string for each one (example: "lr_1E-3,fc=2,conv=2)
hparam = make_hparam_string(learning_rate, use_two_fc, use_two_conv)
print('Starting run for %s' % hparam)
# Actually run with the new settings
mnist_model(learning_rate, use_two_fc, use_two_conv, hparam)
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
ac68fecbfa4b730b98452c152b6d456abb68da9c | 1ea081d37694edf58826724708f5a252b6f059bd | /akad/SnsAdaptorService.py | f4f03991b17798881eb85b37ceb63ff08e7629de | [
"Apache-2.0"
] | permissive | line-bot-oss/oss-bot | 4169b1a97190b97964f902bd6b81c89f7c9764b6 | 1efc1220fb2a308789fa5cfc618a6ae4a22a09d4 | refs/heads/main | 2023-06-12T16:54:47.534742 | 2021-07-02T09:10:42 | 2021-07-02T09:10:42 | 381,408,717 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 25,974 | py | #
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift_0_13_0.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift_0_13_0.protocol.TProtocol import TProtocolException
from thrift_0_13_0.TRecursive import fix_spec
import sys
import logging
from .ttypes import *
from thrift_0_13_0.Thrift import TProcessor
from thrift_0_13_0.transport import TTransport
all_structs = []
class Iface(object):
def getSnsFriends(self, snsIdType, snsAccessToken, startIdx, limit):
"""
Parameters:
- snsIdType
- snsAccessToken
- startIdx
- limit
"""
pass
def getSnsMyProfile(self, snsIdType, snsAccessToken):
"""
Parameters:
- snsIdType
- snsAccessToken
"""
pass
def postSnsInvitationMessage(self, snsIdType, snsAccessToken, toSnsUserId):
"""
Parameters:
- snsIdType
- snsAccessToken
- toSnsUserId
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def getSnsFriends(self, snsIdType, snsAccessToken, startIdx, limit):
"""
Parameters:
- snsIdType
- snsAccessToken
- startIdx
- limit
"""
self.send_getSnsFriends(snsIdType, snsAccessToken, startIdx, limit)
return self.recv_getSnsFriends()
def send_getSnsFriends(self, snsIdType, snsAccessToken, startIdx, limit):
self._oprot.writeMessageBegin('getSnsFriends', TMessageType.CALL, self._seqid)
args = getSnsFriends_args()
args.snsIdType = snsIdType
args.snsAccessToken = snsAccessToken
args.startIdx = startIdx
args.limit = limit
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getSnsFriends(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getSnsFriends_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getSnsFriends failed: unknown result")
def getSnsMyProfile(self, snsIdType, snsAccessToken):
"""
Parameters:
- snsIdType
- snsAccessToken
"""
self.send_getSnsMyProfile(snsIdType, snsAccessToken)
return self.recv_getSnsMyProfile()
def send_getSnsMyProfile(self, snsIdType, snsAccessToken):
self._oprot.writeMessageBegin('getSnsMyProfile', TMessageType.CALL, self._seqid)
args = getSnsMyProfile_args()
args.snsIdType = snsIdType
args.snsAccessToken = snsAccessToken
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getSnsMyProfile(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getSnsMyProfile_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.e is not None:
raise result.e
raise TApplicationException(TApplicationException.MISSING_RESULT, "getSnsMyProfile failed: unknown result")
def postSnsInvitationMessage(self, snsIdType, snsAccessToken, toSnsUserId):
"""
Parameters:
- snsIdType
- snsAccessToken
- toSnsUserId
"""
self.send_postSnsInvitationMessage(snsIdType, snsAccessToken, toSnsUserId)
self.recv_postSnsInvitationMessage()
def send_postSnsInvitationMessage(self, snsIdType, snsAccessToken, toSnsUserId):
self._oprot.writeMessageBegin('postSnsInvitationMessage', TMessageType.CALL, self._seqid)
args = postSnsInvitationMessage_args()
args.snsIdType = snsIdType
args.snsAccessToken = snsAccessToken
args.toSnsUserId = toSnsUserId
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_postSnsInvitationMessage(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = postSnsInvitationMessage_result()
result.read(iprot)
iprot.readMessageEnd()
if result.e is not None:
raise result.e
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["getSnsFriends"] = Processor.process_getSnsFriends
self._processMap["getSnsMyProfile"] = Processor.process_getSnsMyProfile
self._processMap["postSnsInvitationMessage"] = Processor.process_postSnsInvitationMessage
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_getSnsFriends(self, seqid, iprot, oprot):
args = getSnsFriends_args()
args.read(iprot)
iprot.readMessageEnd()
result = getSnsFriends_result()
try:
result.success = self._handler.getSnsFriends(args.snsIdType, args.snsAccessToken, args.startIdx, args.limit)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TalkException as e:
msg_type = TMessageType.REPLY
result.e = e
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getSnsFriends", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getSnsMyProfile(self, seqid, iprot, oprot):
args = getSnsMyProfile_args()
args.read(iprot)
iprot.readMessageEnd()
result = getSnsMyProfile_result()
try:
result.success = self._handler.getSnsMyProfile(args.snsIdType, args.snsAccessToken)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TalkException as e:
msg_type = TMessageType.REPLY
result.e = e
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getSnsMyProfile", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_postSnsInvitationMessage(self, seqid, iprot, oprot):
args = postSnsInvitationMessage_args()
args.read(iprot)
iprot.readMessageEnd()
result = postSnsInvitationMessage_result()
try:
self._handler.postSnsInvitationMessage(args.snsIdType, args.snsAccessToken, args.toSnsUserId)
msg_type = TMessageType.REPLY
except TTransport.TTransportException:
raise
except TalkException as e:
msg_type = TMessageType.REPLY
result.e = e
except TApplicationException as ex:
logging.exception('TApplication exception in handler')
msg_type = TMessageType.EXCEPTION
result = ex
except Exception:
logging.exception('Unexpected exception in handler')
msg_type = TMessageType.EXCEPTION
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("postSnsInvitationMessage", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class getSnsFriends_args(object):
"""
Attributes:
- snsIdType
- snsAccessToken
- startIdx
- limit
"""
def __init__(self, snsIdType=None, snsAccessToken=None, startIdx=None, limit=None,):
self.snsIdType = snsIdType
self.snsAccessToken = snsAccessToken
self.startIdx = startIdx
self.limit = limit
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.I32:
self.snsIdType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.snsAccessToken = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.startIdx = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.limit = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getSnsFriends_args')
if self.snsIdType is not None:
oprot.writeFieldBegin('snsIdType', TType.I32, 2)
oprot.writeI32(self.snsIdType)
oprot.writeFieldEnd()
if self.snsAccessToken is not None:
oprot.writeFieldBegin('snsAccessToken', TType.STRING, 3)
oprot.writeString(self.snsAccessToken.encode('utf-8') if sys.version_info[0] == 2 else self.snsAccessToken)
oprot.writeFieldEnd()
if self.startIdx is not None:
oprot.writeFieldBegin('startIdx', TType.I32, 4)
oprot.writeI32(self.startIdx)
oprot.writeFieldEnd()
if self.limit is not None:
oprot.writeFieldBegin('limit', TType.I32, 5)
oprot.writeI32(self.limit)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getSnsFriends_args)
getSnsFriends_args.thrift_spec = (
None, # 0
None, # 1
(2, TType.I32, 'snsIdType', None, None, ), # 2
(3, TType.STRING, 'snsAccessToken', 'UTF8', None, ), # 3
(4, TType.I32, 'startIdx', None, None, ), # 4
(5, TType.I32, 'limit', None, None, ), # 5
)
class getSnsFriends_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = SnsFriends()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getSnsFriends_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getSnsFriends_result)
getSnsFriends_result.thrift_spec = (
(0, TType.STRUCT, 'success', [SnsFriends, None], None, ), # 0
(1, TType.STRUCT, 'e', [TalkException, None], None, ), # 1
)
class getSnsMyProfile_args(object):
"""
Attributes:
- snsIdType
- snsAccessToken
"""
def __init__(self, snsIdType=None, snsAccessToken=None,):
self.snsIdType = snsIdType
self.snsAccessToken = snsAccessToken
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.I32:
self.snsIdType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.snsAccessToken = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getSnsMyProfile_args')
if self.snsIdType is not None:
oprot.writeFieldBegin('snsIdType', TType.I32, 2)
oprot.writeI32(self.snsIdType)
oprot.writeFieldEnd()
if self.snsAccessToken is not None:
oprot.writeFieldBegin('snsAccessToken', TType.STRING, 3)
oprot.writeString(self.snsAccessToken.encode('utf-8') if sys.version_info[0] == 2 else self.snsAccessToken)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getSnsMyProfile_args)
getSnsMyProfile_args.thrift_spec = (
None, # 0
None, # 1
(2, TType.I32, 'snsIdType', None, None, ), # 2
(3, TType.STRING, 'snsAccessToken', 'UTF8', None, ), # 3
)
class getSnsMyProfile_result(object):
"""
Attributes:
- success
- e
"""
def __init__(self, success=None, e=None,):
self.success = success
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = SnsProfile()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('getSnsMyProfile_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(getSnsMyProfile_result)
getSnsMyProfile_result.thrift_spec = (
(0, TType.STRUCT, 'success', [SnsProfile, None], None, ), # 0
(1, TType.STRUCT, 'e', [TalkException, None], None, ), # 1
)
class postSnsInvitationMessage_args(object):
"""
Attributes:
- snsIdType
- snsAccessToken
- toSnsUserId
"""
def __init__(self, snsIdType=None, snsAccessToken=None, toSnsUserId=None,):
self.snsIdType = snsIdType
self.snsAccessToken = snsAccessToken
self.toSnsUserId = toSnsUserId
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.I32:
self.snsIdType = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.snsAccessToken = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.toSnsUserId = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('postSnsInvitationMessage_args')
if self.snsIdType is not None:
oprot.writeFieldBegin('snsIdType', TType.I32, 2)
oprot.writeI32(self.snsIdType)
oprot.writeFieldEnd()
if self.snsAccessToken is not None:
oprot.writeFieldBegin('snsAccessToken', TType.STRING, 3)
oprot.writeString(self.snsAccessToken.encode('utf-8') if sys.version_info[0] == 2 else self.snsAccessToken)
oprot.writeFieldEnd()
if self.toSnsUserId is not None:
oprot.writeFieldBegin('toSnsUserId', TType.STRING, 4)
oprot.writeString(self.toSnsUserId.encode('utf-8') if sys.version_info[0] == 2 else self.toSnsUserId)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(postSnsInvitationMessage_args)
postSnsInvitationMessage_args.thrift_spec = (
None, # 0
None, # 1
(2, TType.I32, 'snsIdType', None, None, ), # 2
(3, TType.STRING, 'snsAccessToken', 'UTF8', None, ), # 3
(4, TType.STRING, 'toSnsUserId', 'UTF8', None, ), # 4
)
class postSnsInvitationMessage_result(object):
"""
Attributes:
- e
"""
def __init__(self, e=None,):
self.e = e
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.e = TalkException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('postSnsInvitationMessage_result')
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 1)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(postSnsInvitationMessage_result)
postSnsInvitationMessage_result.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'e', [TalkException, None], None, ), # 1
)
fix_spec(all_structs)
del all_structs
| [
"admin@nanato12.info"
] | admin@nanato12.info |
d66f5181446f5349454f7327d80031e5ad23c868 | f859de034c282fd272893887c0aa21c9f8f5bad6 | /test/test_date_time_helper.py | 17b96d7ed85a364f1178795b5663a21674b74804 | [
"Apache-2.0"
] | permissive | Scandinaf/ll_free | cd1cb1a812c0c6b400e266dc843872a59388fa83 | 7d35dce5955f11e4af52400f961c76c9904c2f05 | refs/heads/master | 2020-04-10T19:29:27.671602 | 2019-01-12T05:21:18 | 2019-01-12T05:21:18 | 161,236,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | import datetime
from utils.helper import DateTimeHelper
year = 2020
month = 5
day = 17
hour = 12
minute = 45
helper = DateTimeHelper()
helper.current_date_time = datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute)
def test_subtract_months():
count = 1
result = helper.subtract_months(count)
assert result == datetime.datetime(year=year, month=month-count, day=day, hour=hour, minute=minute)
def test_subtract_days():
count = 5
result = helper.subtract_days(count)
assert result == datetime.datetime(year=year, month=month, day=day-count, hour=hour, minute=minute)
def test_subtract_minutes():
count = 30
result = helper.subtract_minutes(30)
assert result == datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute-count)
def test_get_current_date():
result = helper.get_current_date_time()
assert result == datetime.datetime(year=year, month=month, day=day, hour=hour, minute=minute) | [
"sergeyqwertyborovskiy@gmail.com"
] | sergeyqwertyborovskiy@gmail.com |
1395fdc0e17b6a0304769eba37eb9d6e6ee9afb4 | 018e0e9858249e0c6ed20ee74e37b9f732d3356a | /core_oop.py | 21c37089bf9e3492cfd40945d0d55e6f4b532e1b | [] | no_license | watersb1323/Squash | e6874c2480038bbc07700e3e02ffc157f329dadf | 06cfb02b5c130599a383a882482daf640d45d662 | refs/heads/master | 2020-05-24T21:41:20.716094 | 2018-02-26T23:06:15 | 2018-02-26T23:06:15 | 84,883,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,660 | py | import pandas as pd
class Player:
''' Player as defined by squash information
'''
numPlayers = 0
def __init__(self, name, PF=0, PA=0):
self.name = name
Player.numPlayers += 1
self.id = Player.numPlayers
self.PF = PF
self.PA = PA
def get_name(self):
return self.name
def get_id(self):
return self.id
def add_PF(self, points):
self.PF += points
return self.PF
def get_PF(self):
return self.PF
def add_PA(self, points):
self.PA += points
return self.PA
def get_PA(self):
return self.PA
class Game:
''' Game had between two players
'''
numGames = 0
def __init__(self, date, session_num, player1, player2, player1score, player2score):
self.date = date
self.session_num = session_num
self.player1 = player1
self.player2 = player2
self.player1score = player1score
self.player2score = player2score
Game.numGames += 1
self.id = Game.numGames
def get_player1_name(self):
return self.player1.getname()
def get_id(self):
return self.id
class Session:
''' Session had consisting of multiple games
'''
def __init__(self, date, session_num):
self.date = date
self.session_num = session_num
self.games = {}
self.players = {}
def add_game(self, game):
self.games[game.get_id()] = game
return self.games[game.get_id()]
def add_player(self, player):
self.players[player.get_name()] = player
return self.players[player.get_name()]
def get_num_games(self):
return len(self.games)
def get_num_players(self):
return len(self.players)
def get_session_info(self):
return 'Session {0} was played by {1} players ' \
'who collectively played {2} games.'.format(self.session_num,
self.get_num_players(),
self.get_num_games())
if __name__ == "__main__":
session_headers = [
'Player1',
'Score1',
'Score2',
'Player2',
'Date'
]
session_data = [
['Brian', 2, 11, 'Steve', 'll/12/2017'],
['James', 2, 11, 'Steve', 'll/12/2017'],
['Brian', 12, 10, 'James', 'll/12/2017']
]
session_df = pd.DataFrame(data=session_data, columns=session_headers)
print(session_df.head())
# Obtain unique list of players from input table
players_list = list(set(session_df[['Player1', 'Player2']].stack().values))
players = {}
for pl in players_list:
players[pl] = Player(pl)
# Define Session object and cycle through games adding game information
date_of_session = '11/12/2017'
num_of_session = 3
session_obj = Session(date_of_session, num_of_session)
for _, game in session_df.iterrows():
p1 = players[game['Player1']]
p2 = players[game['Player2']]
p1_score = game['Score1']
p2_score = game['Score2']
p1.add_PF(p1_score)
p2.add_PF(p2_score)
p1.add_PA(p2_score)
p2.add_PA(p1_score)
game_obj = Game(
date_of_session,
num_of_session,
p1, p2,
p1_score, p2_score
)
session_obj.add_game(game_obj)
session_obj.add_player(p1)
session_obj.add_player(p2)
print(session_obj.get_session_info())
for _, playa in players.items():
print(playa.get_PA())
print(playa.get_PF())
| [
"watersb1323@gmail.com"
] | watersb1323@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.