blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c5030a179cc631854c7d96c7e2d1c6386015dbf | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2209/60752/238089.py | 1fbb44fa4a0aa3c0bafbae2da87777e5890a840f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | #include <bits/stdc++.h>
#define ls q << 1
#define rs q << 1 | 1
using namespace std;
const int N = 3e5 + 11;
const int inf = 0x3f3f3f3f;
char s[N], c[N];
int n, m, cnt, trie[N][26], ed[N], tr[N << 2];
int dep[N], f[N], g[N], v[N], fail[N];
queue<int> q;
void insert() {
int now = 0, len = strlen(c + 1);
for (int i = 1; i <= len; i++) {
int num = c[i] - 'a';
if (!trie[now][num])
trie[now][num] = ++cnt;
dep[trie[now][num]] = dep[now] + 1;
now = trie[now][num];
}
ed[now] = 1;
}
void makefail() {
for (int i = 0; i < 26; i++)
if (trie[0][i])
q.push(trie[0][i]);
while (!q.empty()) {
int x = q.front();
q.pop();
if (ed[x])
v[x] = dep[x];
else
v[x] = v[fail[x]];
for (int i = 0; i < 26; i++) {
if (trie[x][i])
fail[trie[x][i]] = trie[fail[x]][i], q.push(trie[x][i]);
else
trie[x][i] = trie[fail[x]][i];
}
}
}
void update(int q) { tr[q] = min(tr[ls], tr[rs]); }
int query(int q, int l, int r, int L, int R) {
if (l >= L && r <= R)
return tr[q];
int mid = l + r >> 1, re = inf;
if (mid >= L)
re = min(re, query(ls, l, mid, L, R));
if (mid < R)
re = min(re, query(rs, mid + 1, r, L, R));
return re;
}
void modify(int q, int l, int r, int x, int v) {
if (l == r)
return tr[q] = v, void();
int mid = l + r >> 1;
if (mid >= x)
modify(ls, l, mid, x, v);
else
modify(rs, mid + 1, r, x, v);
update(q);
}
int read() {
int x = 0, f = 1;
char ch = getchar();
while (!isdigit(ch)) {
if (ch == '-')
f = -f;
ch = getchar();
}
while (isdigit(ch)) {
x = x * 10 + ch - 48;
ch = getchar();
}
return x * f;
}
signed main() {
m = read();
scanf("%s", s + 1);
n = strlen(s + 1);
for (int i = 1; i <= m; i++) {
scanf("%s", c + 1);
insert();
}
makefail();
int now = 0;
for (int i = 1; i <= n; i++) {
int x = trie[now][s[i] - 'a'];
g[i] = v[x];
now = x;
}
memset(tr, 0x3f, sizeof(tr));
for (int i = 1; i <= n; i++) {
if (!g[i])
f[i] = inf;
else {
if (g[i] == i)
f[i] = 1;
else
f[i] = query(1, 1, n, i - g[i], i - 1) + 1;
}
modify(1, 1, n, i, f[i]);
}
printf("%d\n", f[n] >= inf ? -1 : f[n]);
return 0;
} | [
"1069583789@qq.com"
] | 1069583789@qq.com |
f646f114ff492f1ee43d8edfa4022c398984be63 | 5c16b25d78823499d3a8b33a59636ce3b0923da1 | /articleapp/migrations/0001_initial.py | 7006451c2c3a7fda0d4f3cdbd5861c97b1908a4c | [] | no_license | noeul1114/gis_3ban_1 | 351b3dd5dd306a333df657bf9d1cdad0827f4161 | e25fac8922984c7a3b42d8f97ac8ce230d0224fb | refs/heads/master | 2023-08-26T14:41:27.970660 | 2021-10-06T06:59:00 | 2021-10-06T06:59:00 | 382,189,975 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,009 | py | # Generated by Django 3.2.4 on 2021-08-03 01:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, null=True)),
('image', models.ImageField(null=True, upload_to='article/')),
('content', models.TextField(null=True)),
('created_at', models.DateField(auto_now_add=True, null=True)),
('writer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='article', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"parkhyongsok@naver.com"
] | parkhyongsok@naver.com |
e97bec724b57e515c3368cbf9177b005f77ae1ea | af7466d6abfcce9e02efe91abe1875fbcf8d04aa | /lib/parsers/parse856Holding.py | 7b351add50a67d0c83f41d770abb32c03de734fb | [] | no_license | NYPL/sfr-oclc-catalog-lookup | eb1472d1a6cab85734b4c0ac6648de846e5b00fb | 4bf3bde518211870d6c20cde840c57bd83c1816c | refs/heads/development | 2020-04-15T07:45:59.184860 | 2020-02-05T21:22:32 | 2020-02-05T21:22:32 | 164,501,003 | 1 | 1 | null | 2020-02-05T21:22:34 | 2019-01-07T21:43:14 | Python | UTF-8 | Python | false | false | 7,355 | py | import math
from multiprocessing import Process, Pipe
from multiprocessing.connection import wait
import re
import requests
from helpers.errorHelpers import HoldingError
from lib.dataModel import Link, Identifier
class HoldingParser:
EBOOK_REGEX = {
'gutenberg': r'gutenberg.org\/ebooks\/[0-9]+\.epub\.(?:no|)images$',
'internetarchive': r'archive.org\/details\/[a-z0-9]+$',
'hathitrust': r'catalog.hathitrust.org\/api\/volumes\/[a-z]{3,6}\/[a-zA-Z0-9]+\.html' # noqa: E501
}
ID_REGEX = {
'oclc': r'oclc\/([0-9]+)',
'gutenberg': r'gutenberg.org\/ebooks\/([0-9]+)$'
}
URI_ID_REGEX = r'\/((?:(?!\/)[^.])+(?=$|\.[a-z]{3,4}$))'
HATHI_OCLC_REGEX = r'([a-z]+\/[a-z0-9]+)\.html$'
HATHI_ID_REGEX = r'id=([a-z\.\/\$0-9]+)'
HATHI_DOWNLOAD_URL = 'babel.hathitrust.org/cgi/imgsrv/download/pdf?id={}'
HATHI_METADATA_URL = 'http://catalog.hathitrust.org/api/volumes/full/{}.json'
def __init__(self, field, instance):
self.field = field
self.instance = instance
self.source = 'unknown'
def parseField(self):
if self.field.ind1 != '4':
raise HoldingError('856 does not contain an HTTP reference')
try:
self.uri = self.field.subfield('u')[0].value
except IndexError:
raise HoldingError('856 Field is missing u subfield for URI')
self.identifier = self.loadURIid()
def loadURIid(self):
"""Regex to extract identifier from an URI. \/((?:(?!\/)[^.])+ matches
the path of the URI, excluding anything before the final slash (e.g.
will match "1234" from http://test.com/1234) (?=$|\.[a-z]{3,4}$)) is a
positive lookahead that excludes the file format from the identifier
(so the above will still return "1234" if the URI ends in "1234.epub")
"""
uriGroup = re.search(self.URI_ID_REGEX, self.uri)
if uriGroup is not None:
self.identifier = uriGroup.group(1)
else:
self.identifier = self.uri
def extractBookLinks(self):
if self.matchEbook() is True:
return
elif self.matchIdentifier() is True:
return
else:
self.instance.links.append(
HoldingParser.createLink(self.uri, 'text/html')
)
def matchEbook(self):
for source, regex in self.EBOOK_REGEX.items():
self.source = source
if re.search(regex, self.uri):
if source == 'internetarchive':
if self.checkIAStatus() is True:
return None
elif source == 'hathitrust':
self.parseHathiLink()
return None
self.instance.addFormat(**{
'source': source,
'content_type': 'ebook',
'links': [
self.createLink(
self.uri, 'text/html',
local=False, download=False, images=False, ebook=True
)
],
'identifiers': [Identifier(identifier=self.identifier, source='hathi')]
})
return True
def matchIdentifier(self):
for idType, regex in self.ID_REGEX.items():
idGroup = re.search(regex, self.uri)
if idGroup is not None:
self.instance.addIdentifier(**{
'type': idType,
'identifier': idGroup.group(1),
'weight': 0.8
})
return True
def checkIAStatus(self):
metadataURI = self.uri.replace('details', 'metadata')
metadataResp = requests.get(metadataURI)
if metadataResp.status_code == 200:
iaData = metadataResp.json()
iaMeta = iaData['metadata']
if iaMeta.get('access-restricted-item', False) is False:
return False
return True
def parseHathiLink(self):
if 'catalog' not in self.uri:
return None
self.loadCatalogLinks()
def loadCatalogLinks(self):
hathiIDGroup = re.search(self.HATHI_OCLC_REGEX, self.uri)
if hathiIDGroup:
hathiID = hathiIDGroup.group(1)
hathiItems = self.fetchHathiItems(hathiID)
if hathiItems:
self.startHathiMultiprocess(hathiItems)
def startHathiMultiprocess(self, hathiItems):
processes = []
outPipes = []
cores = 4
chunkSize = math.ceil(len(hathiItems) / cores)
for i in range(cores):
start = i * chunkSize
end = start + chunkSize
pConn, cConn = Pipe(duplex=False)
proc = Process(
target=self.processHathiChunk,
args=(hathiItems[start:end], cConn)
)
processes.append(proc)
outPipes.append(pConn)
proc.start()
cConn.close()
while outPipes:
for p in wait(outPipes):
try:
newItem = p.recv()
if newItem == 'DONE':
outPipes.remove(p)
else:
self.instance.addFormat(**newItem)
except EOFError:
outPipes.remove(p)
for proc in processes:
proc.join()
def fetchHathiItems(self, hathiID):
apiURL = self.HATHI_METADATA_URL.format(
hathiID
)
apiResp = requests.get(apiURL)
if apiResp.status_code == 200:
catalogData = apiResp.json()
return catalogData.get('items', [])
def processHathiChunk(self, hathiItems, cConn):
for recItem in hathiItems:
newItem = self.getNewItemLinks(recItem)
if newItem is not None:
cConn.send(newItem)
cConn.send('DONE')
cConn.close()
def getNewItemLinks(self, recItem):
if recItem.get('rightsCode', 'ic') in ['ic', 'icus', 'ic-world', 'und']:
return
redirectURL = requests.head(recItem['itemURL'])
realURL = redirectURL.headers['Location'].replace('https://', '')
hathiID = re.search(self.HATHI_ID_REGEX, realURL).group(1)
downloadURL = self.HATHI_DOWNLOAD_URL.format(hathiID)
return {
'source': self.source,
'content_type': 'ebook',
'links': [
HoldingParser.createLink(
realURL, 'text/html',
local=False, download=False, images=True, ebook=False
),
HoldingParser.createLink(
downloadURL, 'application/pdf',
local=False, download=True, images=True, ebook=False
)
],
'identifiers': [Identifier(identifier=hathiID, source='hathi')]
}
@staticmethod
def createLink(uri, mediaType, local=False, download=False, images=False, ebook=False):
return Link(
url=uri,
mediaType=mediaType,
flags={'local': local, 'download': download, 'images': images, 'ebook': ebook}
)
| [
"mwbenowitz@gmail.com"
] | mwbenowitz@gmail.com |
d1896c910c15d67fea04a633a1af5eeb4bcb8691 | a04363ff165d9de42ceb7f277fe3e9896a443627 | /bdai_master-master/lambda_functions/twitter-pipeline/handler.py | 3cd04f129c77bbc4e7b8d37c4fe2a0bbc0eaa1af | [] | no_license | webclinic017/bdai_master | 45327d3ca29262c31986ad1fd05ea96c45949135 | c6953cce37764965b5caa0ea6b3add6df47f8ce4 | refs/heads/master | 2023-07-26T06:43:42.545663 | 2020-04-13T23:13:05 | 2020-04-13T23:13:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,364 | py | import re
import tweepy
import boto3
from tweepy import OAuthHandler
from textblob import TextBlob
# In[2]:
consumer_key = 'GIw0j8Nm3Qx9YYQCo5SLnqljh'
consumer_secret = 'HMLyZMmeGbhV9hnQkYeAFKJp0ynPsVWri3RT4FHTxNwQ2gad3g'
access_token = '2748454529-gTBtq6YTLRTRdMhUMiVISbFp3BPlP5pmfB9wRST'
access_token_secret = '1Fofwl74IXKOxFLkLHgKK42nLKg65OA3PMaEyKIlkkFDF'
# In[3]:
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# In[4]:
# want to find a way to ignore tweets that are affiliate links ie. robinhood links
def clean_tweet(self, tweet):
'''
Utility function to clean tweet text by removing links, special characters
using simple regex statements.
'''
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
# In[10]:
def get_tweets(self, query, count):
tweets = []
try:
# call twitter api to fetch tweets
fetched_tweets = self.search(q = query, count = count)
# parsing tweets one by one
for tweet in fetched_tweets:
# empty dictionary to store required params of a tweet
parsed_tweet = {}
# saving ticker query of tweet
parsed_tweet['query'] = query
# saving text of tweet
parsed_tweet['text'] = tweet.text
# saving sentiment of tweet
parsed_tweet['sentiment'] = get_tweet_sentiment(api, tweet.text)
# saving time of tweet
parsed_tweet['created_at'] = str(tweet.created_at)
# appending parsed tweet to tweets list
if tweet.retweet_count > 0:
# if tweet has retweets, ensure that it is appended only once
if parsed_tweet not in tweets:
tweets.append(parsed_tweet)
else:
tweets.append(parsed_tweet)
# return parsed tweets
return tweets
except tweepy.TweepError as e:
# print error (if any)
print("Error : " + str(e))
# In[12]:
def get_tweet_sentiment(self, tweet):
# create TextBlob object of passed tweet text
analysis = TextBlob(clean_tweet(api, tweet))
# set sentiment
return analysis.sentiment.polarity
def change_tweet_to_utf8(tweets):
for tweet in tweets:
tweet["text"] = tweet["text"].encode("utf-8")
print(tweet)
print()
# In[13]:
def twitter_handler(event, context):
tweets = get_tweets(api, query = '$aapl', count = 100)
change_tweet_to_utf8(tweets)
tweets2 = get_tweets(api, query = '$googl', count = 100)
change_tweet_to_utf8(tweets2)
tweets3 = get_tweets(api, query = '$mmm', count = 100)
change_tweet_to_utf8(tweets3)
tweets4 = get_tweets(api, query = '$xom', count = 100)
change_tweet_to_utf8(tweets4)
tweets5 = get_tweets(api, query = '$csco', count = 100)
change_tweet_to_utf8(tweets5)
tweets6 = get_tweets(api, query = '$ge', count = 100)
change_tweet_to_utf8(tweets6)
tweets7 = get_tweets(api, query = '$hd', count = 100)
change_tweet_to_utf8(tweets7)
tweets8 = get_tweets(api, query = '$psx', count = 100)
change_tweet_to_utf8(tweets8)
tweets9 = get_tweets(api, query = '$mlpx', count = 100)
change_tweet_to_utf8(tweets9)
tweets10 = get_tweets(api, query = '$oxy', count = 100)
change_tweet_to_utf8(tweets10)
tweets11 = get_tweets(api, query = '$regi', count = 100)
change_tweet_to_utf8(tweets11)
tweets12 = get_tweets(api, query = '$mro', count = 100)
change_tweet_to_utf8(tweets12)
tweets13 = get_tweets(api, query = '$nrg', count = 100)
change_tweet_to_utf8(tweets13)
tweets14 = get_tweets(api, query = '$enbl', count = 100)
change_tweet_to_utf8(tweets14)
tweets15 = get_tweets(api, query = '$intc', count = 100)
change_tweet_to_utf8(tweets15)
bucket_name = "twitter-pipeline-bucket"
lambda_path = "/tmp/" + file_name
s3_path = "/tweets/" + file_name
s3 = boto3.resource("s3")
s3.Bucket(bucket_name).put_object(Key=s3_path, Body=encoded_string)
return {
'statusCode': 200,
'body': "Successful"
}
| [
"noreply@github.com"
] | webclinic017.noreply@github.com |
bfa6c57b1cf19aaec50e98c4cf9775c806514880 | 0c752e60f2eeac20673db6298984f5300ee00591 | /setup.py | 266648b0a808735ec3e3ac432018f3b940929078 | [] | no_license | Tamosauskas/collective.newsticker | d3473e5dc68c0874a0e812d9d9c2bb43746ed4f4 | 91ef5ba610b68895e2f7dba532c7b1a296eaba00 | refs/heads/master | 2021-01-16T00:47:38.423885 | 2011-10-06T13:23:52 | 2011-10-06T13:23:52 | 2,525,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
version = '1.0dev'
setup(name='collective.newsticker',
version=version,
description="An implementation of the jQuery News Ticker Plugin for Plone.",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Programming Language :: Python",
],
keywords='plone jquery',
author='Héctor Velarde',
author_email='hector.velarde@gmail.com',
url='https://github.com/collective/collective.newsticker',
license='GPL',
packages=find_packages('src'),
package_dir={'': 'src'},
namespace_packages=['collective'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'five.grok',
'zope.schema>=3.8.0', # required to use IContextAwareDefaultFactory
],
extras_require={
'test': ['plone.app.testing'],
},
entry_points="""
[z3c.autoinclude.plugin]
target = plone
""",
)
| [
"hector.velarde@gmail.com"
] | hector.velarde@gmail.com |
c57cf7c79f465eda9b6a2c3a446cf3a641826adc | 3bae1ed6460064f997264091aca0f37ac31c1a77 | /apps/cloud_api_generator/generatedServer/tasklets/machine/rollback/machine_rollback.py | 7ba45c98b3698e570b614117f8241526791bd529 | [] | no_license | racktivity/ext-pylabs-core | 04d96b80ac1942754257d59e91460c3a141f0a32 | 53d349fa6bee0ccead29afd6676979b44c109a61 | refs/heads/master | 2021-01-22T10:33:18.523799 | 2017-06-08T09:09:28 | 2017-06-08T09:09:28 | 54,314,984 | 0 | 0 | null | 2017-06-08T09:09:29 | 2016-03-20T11:55:01 | Python | UTF-8 | Python | false | false | 179 | py | __author__ = 'aserver'
__tags__ = 'machine', 'rollback'
__priority__= 3
def main(q, i, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True
| [
"devnull@localhost"
] | devnull@localhost |
1018f09e7291d2d37f9db87d4d60882d3bba1b3f | 388ee4f6147c28a54125c6c3e90e47da207fab65 | /lib/python3.5/site-packages/boto-2.48.0-py3.5.egg/EGG-INFO/scripts/fetch_file | d4c43cc3fd280687105334bb2a32d6a23a3e3c79 | [] | no_license | alikhundmiri/save_PIL_to_S3 | 08ea5f622906528504d6742a39d5f50d83b57340 | 2229ed410cd3acd4e1be3ed4287709747dccc617 | refs/heads/master | 2020-03-10T21:13:14.333990 | 2018-04-15T08:34:53 | 2018-04-15T08:34:53 | 129,588,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,907 | #!/Users/alikhundmiri/Desktop/pythons/Image/bin/python
# Copyright (c) 2009 Chris Moyer http://coredumped.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import sys
if __name__ == "__main__":
from optparse import OptionParser
usage = """%prog [options] URI
Fetch a URI using the boto library and (by default) pipe contents to STDOUT
The URI can be either an HTTP URL, or "s3://bucket_name/key_name"
"""
parser = OptionParser(version="0.1", usage=usage)
parser.add_option("-o", "--out-file",
help="File to receive output instead of STDOUT",
dest="outfile")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_help()
sys.exit(1)
from boto.utils import fetch_file
f = fetch_file(args[0])
if options.outfile:
open(options.outfile, "w").write(f.read())
else:
print(f.read())
| [
"salikhundmiri@gmail.com"
] | salikhundmiri@gmail.com | |
8c5207ebadc1ac132c9e5ae23a332403c97b4c57 | 22c5fc7dd52149ebd4338a487ae9ab0db0e43f01 | /tests/test_ahnet.py | 509cfbc59c4f97b70213d092f404c5db9b7cb2e6 | [
"Apache-2.0"
] | permissive | precision-medicine-um/MONAI-Deep_Learning | 3d3f547dd9815152561a6853f8d4727b0e5ca4c4 | d94c4d3a2c465717ba3fae01b7acea7fada9885b | refs/heads/master | 2022-12-28T07:04:07.768415 | 2020-10-17T13:11:56 | 2020-10-17T13:11:56 | 305,346,962 | 3 | 0 | Apache-2.0 | 2022-12-27T15:44:13 | 2020-10-19T10:30:07 | Python | UTF-8 | Python | false | false | 5,322 | py | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.networks.blocks import FCN, MCFCN
from monai.networks.nets import AHNet
from tests.utils import skip_if_quick
TEST_CASE_FCN_1 = [
{"out_channels": 3, "upsample_mode": "transpose"},
torch.randn(5, 3, 64, 64),
(5, 3, 64, 64),
]
TEST_CASE_FCN_2 = [
{"out_channels": 2, "upsample_mode": "transpose", "pretrained": True, "progress": False},
torch.randn(5, 3, 64, 64),
(5, 2, 64, 64),
]
TEST_CASE_FCN_3 = [
{"out_channels": 1, "upsample_mode": "bilinear", "pretrained": False},
torch.randn(5, 3, 64, 64),
(5, 1, 64, 64),
]
TEST_CASE_MCFCN_1 = [
{"out_channels": 3, "in_channels": 8, "upsample_mode": "transpose", "progress": False},
torch.randn(5, 8, 64, 64),
(5, 3, 64, 64),
]
TEST_CASE_MCFCN_2 = [
{"out_channels": 2, "in_channels": 1, "upsample_mode": "transpose", "progress": True},
torch.randn(5, 1, 64, 64),
(5, 2, 64, 64),
]
TEST_CASE_MCFCN_3 = [
{"out_channels": 1, "in_channels": 2, "upsample_mode": "bilinear", "pretrained": False},
torch.randn(5, 2, 64, 64),
(5, 1, 64, 64),
]
TEST_CASE_AHNET_2D_1 = [
{"spatial_dims": 2, "upsample_mode": "bilinear"},
torch.randn(3, 1, 128, 128),
(3, 1, 128, 128),
]
TEST_CASE_AHNET_2D_2 = [
{"spatial_dims": 2, "upsample_mode": "transpose", "out_channels": 2},
torch.randn(2, 1, 128, 128),
(2, 2, 128, 128),
]
TEST_CASE_AHNET_2D_3 = [
{"spatial_dims": 2, "upsample_mode": "bilinear", "out_channels": 2},
torch.randn(2, 1, 160, 128),
(2, 2, 160, 128),
]
TEST_CASE_AHNET_3D_1 = [
{"spatial_dims": 3, "upsample_mode": "trilinear"},
torch.randn(3, 1, 128, 128, 64),
(3, 1, 128, 128, 64),
]
TEST_CASE_AHNET_3D_2 = [
{"spatial_dims": 3, "upsample_mode": "transpose", "out_channels": 2},
torch.randn(2, 1, 128, 128, 64),
(2, 2, 128, 128, 64),
]
TEST_CASE_AHNET_3D_3 = [
{"spatial_dims": 3, "upsample_mode": "trilinear", "out_channels": 2},
torch.randn(2, 1, 160, 128, 64),
(2, 2, 160, 128, 64),
]
TEST_CASE_AHNET_3D_WITH_PRETRAIN_1 = [
{"spatial_dims": 3, "upsample_mode": "trilinear"},
torch.randn(3, 1, 128, 128, 64),
(3, 1, 128, 128, 64),
{"out_channels": 1, "upsample_mode": "transpose"},
]
TEST_CASE_AHNET_3D_WITH_PRETRAIN_2 = [
{"spatial_dims": 3, "upsample_mode": "transpose", "out_channels": 2},
torch.randn(2, 1, 128, 128, 64),
(2, 2, 128, 128, 64),
{"out_channels": 1, "upsample_mode": "bilinear"},
]
TEST_CASE_AHNET_3D_WITH_PRETRAIN_3 = [
{"spatial_dims": 3, "upsample_mode": "transpose", "in_channels": 2, "out_channels": 3},
torch.randn(2, 2, 128, 128, 64),
(2, 3, 128, 128, 64),
{"out_channels": 1, "upsample_mode": "bilinear"},
]
class TestFCN(unittest.TestCase):
@parameterized.expand([TEST_CASE_FCN_1, TEST_CASE_FCN_2, TEST_CASE_FCN_3])
def test_fcn_shape(self, input_param, input_data, expected_shape):
net = FCN(**input_param)
net.eval()
with torch.no_grad():
result = net.forward(input_data)
self.assertEqual(result.shape, expected_shape)
class TestMCFCN(unittest.TestCase):
@parameterized.expand([TEST_CASE_MCFCN_1, TEST_CASE_MCFCN_2, TEST_CASE_MCFCN_3])
def test_mcfcn_shape(self, input_param, input_data, expected_shape):
net = MCFCN(**input_param)
net.eval()
with torch.no_grad():
result = net.forward(input_data)
self.assertEqual(result.shape, expected_shape)
class TestAHNET(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_AHNET_2D_1,
TEST_CASE_AHNET_2D_2,
TEST_CASE_AHNET_2D_3,
TEST_CASE_AHNET_3D_1,
TEST_CASE_AHNET_3D_2,
TEST_CASE_AHNET_3D_3,
]
)
@skip_if_quick
def test_ahnet_shape(self, input_param, input_data, expected_shape):
net = AHNet(**input_param)
net.eval()
with torch.no_grad():
result = net.forward(input_data)
self.assertEqual(result.shape, expected_shape)
class TestAHNETWithPretrain(unittest.TestCase):
@parameterized.expand(
[
TEST_CASE_AHNET_3D_WITH_PRETRAIN_1,
TEST_CASE_AHNET_3D_WITH_PRETRAIN_2,
TEST_CASE_AHNET_3D_WITH_PRETRAIN_3,
]
)
@skip_if_quick
def test_ahnet_shape(self, input_param, input_data, expected_shape, fcn_input_param):
net = AHNet(**input_param)
net2d = FCN(**fcn_input_param)
net.copy_from(net2d)
net.eval()
with torch.no_grad():
result = net.forward(input_data)
self.assertEqual(result.shape, expected_shape)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | precision-medicine-um.noreply@github.com |
927250df7665ad2bafdbed1ebce7ed9e8f9b37d5 | 37061d249207275daad5f465522f0f5b258daac4 | /mall/mall/apps/verifications/urls.py | b3b491292b8d83b6715561bce4ff44a4c0f5bb70 | [] | no_license | chanwanxiang/mallinfo | e4d9322436cf1711f7864acb8e288fb8ccf44c0c | f730773509dcc052374720ae5eae2c012bf96bb8 | refs/heads/main | 2023-04-19T11:41:23.124078 | 2021-05-12T08:30:56 | 2021-05-12T08:30:56 | 354,263,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^image_code/(?P<uuid>[\w-]+)/$', views.ImageCodeView.as_view()),
url(r'^sms_code/(?P<mobile>1[3-9]\d{9})$/', views.SmsCodeView.as_view()),
]
| [
"595366700@qq.com"
] | 595366700@qq.com |
8deb8da80cad74ca6d0ca6fa50cd7220d4b62737 | 947a46aee6191a640938cf1b5892aa577ca8708b | /independentbanker/spiders/spider.py | 690b4738b3eaf75543710e2d6532cc5596beaac6 | [] | no_license | hristo-grudev/independentbanker | 6f8a39555b093fdf52218d76ee0c656c37166cee | d2d1f0892227e5f14b98a38e67413bf78ec7a443 | refs/heads/main | 2023-03-30T15:14:37.784357 | 2021-04-02T07:36:43 | 2021-04-02T07:36:43 | 353,943,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,421 | py | import datetime
import scrapy
from scrapy.loader import ItemLoader
from ..items import IndependentbankerItem
from itemloaders.processors import TakeFirst
base = 'https://independentbanker.org/{}/'
class IndependentbankerSpider(scrapy.Spider):
name = 'independentbanker'
year = 2011
start_urls = [base.format(year)]
def parse(self, response):
post_links = response.xpath('//div[@class="post-inner"]//h2[@class="entry-title"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
next_page = response.xpath('//a[@class="next page-numbers"]/@href').getall()
yield from response.follow_all(next_page, self.parse)
if self.year < datetime.datetime.now().year:
self.year += 1
yield response.follow(base.format(self.year), self.parse)
def parse_post(self, response):
title = response.xpath('//header[@class="entry-header"]//h1/text()').get()
description = response.xpath('//div[@class="entry-content"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description if '{' not in p]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="entry-meta"]/text()').get()
item = ItemLoader(item=IndependentbankerItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
544128c3d90a839809f1c93fffa23c0ff34618d2 | 97249b5f3b0054ccb7e61b211c525af7e9842f48 | /clickpost/router.py | 1d5cd19329b1b97edd20a5943801742409e865ad | [] | no_license | itssonamsinha/testing2 | ebaf88b7c30c8d9bd995e0eac687c8650c3ebc83 | 8800baf8cf3dd5bbfc97959bab0a2c1a674c7587 | refs/heads/master | 2021-03-15T06:12:24.921359 | 2020-03-18T07:42:16 | 2020-03-18T07:42:16 | 246,830,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py | from django.urls import path
from .views import NotificationView
urlpatterns = [
path('sendSms', NotificationView.as_view({'get': 'retrieve_sms'}), name='send-sms'),
path('sendWhatsApp', NotificationView.as_view({'get': 'retrieve_whatsapp'}), name='send-sms'),
path('sendProductNotification', NotificationView.as_view({'get': 'send_notification'}), name='send-notification'),
] | [
"sonamsinha@policybazaar.com"
] | sonamsinha@policybazaar.com |
6971479fb0d05f17efa38c065ea41d78c4494504 | fdbb74a95924e2677466614f6ab6e2bb13b2a95a | /third_party/python/Lib/distutils/tests/test_extension.py | e35f2738b6a21966e862cb0bbecbe92f6b0d60ef | [
"Python-2.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"ISC"
] | permissive | jart/cosmopolitan | fb11b5658939023977060a7c6c71a74093d9cb44 | 0d748ad58e1063dd1f8560f18a0c75293b9415b7 | refs/heads/master | 2023-09-06T09:17:29.303607 | 2023-09-02T03:49:13 | 2023-09-02T03:50:18 | 272,457,606 | 11,887 | 435 | ISC | 2023-09-14T17:47:58 | 2020-06-15T14:16:13 | C | UTF-8 | Python | false | false | 2,768 | py | """Tests for distutils.extension."""
import unittest
import os
import warnings
from test.support import check_warnings, run_unittest
from distutils.extension import read_setup_file, Extension
class ExtensionTestCase(unittest.TestCase):
def test_read_setup_file(self):
# trying to read a Setup file
# (sample extracted from the PyGame project)
setup = os.path.join(os.path.dirname(__file__), 'Setup.sample')
exts = read_setup_file(setup)
names = [ext.name for ext in exts]
names.sort()
# here are the extensions read_setup_file should have created
# out of the file
wanted = ['_arraysurfarray', '_camera', '_numericsndarray',
'_numericsurfarray', 'base', 'bufferproxy', 'cdrom',
'color', 'constants', 'display', 'draw', 'event',
'fastevent', 'font', 'gfxdraw', 'image', 'imageext',
'joystick', 'key', 'mask', 'mixer', 'mixer_music',
'mouse', 'movie', 'overlay', 'pixelarray', 'pypm',
'rect', 'rwobject', 'scrap', 'surface', 'surflock',
'time', 'transform']
self.assertEqual(names, wanted)
def test_extension_init(self):
# the first argument, which is the name, must be a string
self.assertRaises(AssertionError, Extension, 1, [])
ext = Extension('name', [])
self.assertEqual(ext.name, 'name')
# the second argument, which is the list of files, must
# be a list of strings
self.assertRaises(AssertionError, Extension, 'name', 'file')
self.assertRaises(AssertionError, Extension, 'name', ['file', 1])
ext = Extension('name', ['file1', 'file2'])
self.assertEqual(ext.sources, ['file1', 'file2'])
# others arguments have defaults
for attr in ('include_dirs', 'define_macros', 'undef_macros',
'library_dirs', 'libraries', 'runtime_library_dirs',
'extra_objects', 'extra_compile_args', 'extra_link_args',
'export_symbols', 'swig_opts', 'depends'):
self.assertEqual(getattr(ext, attr), [])
self.assertEqual(ext.language, None)
self.assertEqual(ext.optional, None)
# if there are unknown keyword options, warn about them
with check_warnings() as w:
warnings.simplefilter('always')
ext = Extension('name', ['file1', 'file2'], chic=True)
self.assertEqual(len(w.warnings), 1)
self.assertEqual(str(w.warnings[0].message),
"Unknown Extension options: 'chic'")
def test_suite():
return unittest.makeSuite(ExtensionTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| [
"jtunney@gmail.com"
] | jtunney@gmail.com |
83f72edc799573491790f4191be54267fb97346d | 44ff608608e8042f86edf08aa8fa4469a9786837 | /nacos/errors.py | 79ede9ddef722579a47ca10738f250a44bb38d89 | [
"MIT"
] | permissive | neggplant/pynacos-sdk | 928a2297eaaaebdf3f595328f29d378d028a9fed | c98521ca706437262b680f9beeb1a2e4a6dad2d8 | refs/heads/master | 2023-02-26T17:57:45.967901 | 2021-02-03T13:24:16 | 2021-02-03T13:24:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | class RequestError(Exception):
def __init__(self, message="failed to request data"):
Exception.__init__(self, message)
class ParamError(Exception):
def __init__(self, message="invalid param"):
Exception.__init__(self, message)
| [
"olivetree123@163.com"
] | olivetree123@163.com |
9da6a8533f52d115e139ed88b0df5fb07b8ce617 | c08d8126a90f773f0cf04237157a578f82b0a2ac | /libs/dataset.py | b01b5ec07253fddd3c00bcc8654451e96caf6269 | [] | no_license | GOSSAN0602/baidu_car_pose_estimation | cf2d4a8b69f629b01bc70f657c4f0981999034a4 | 2a2e77bae99011890ca7861a24298ebffa604b3d | refs/heads/master | 2020-12-05T07:20:42.358114 | 2020-01-08T15:27:49 | 2020-01-08T15:27:49 | 232,045,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | import numpy as np
import pandas as pd
import cv2
from torch.utils.data import Dataset, DataLoader
import torch
import sys
sys.path.append('./')
from libs.img_preprocess import *
def imread(path, fast_mode=False):
img = cv2.imread(path)
if not fast_mode and img is not None and len(img.shape) == 3:
img = np.array(img[:, :, ::-1])
return img
class CarDataset(Dataset):
"""Car dataset."""
def __init__(self, dataframe, root_dir, training=True, transform=None):
self.df = dataframe
self.root_dir = root_dir
self.transform = transform
self.training = training
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# Get image name
idx, labels = self.df.values[idx]
img_name = self.root_dir.format(idx)
# Read image
img0 = imread(img_name, True)
if self.transform is not None:
img0 = self.transform(img0)
img = preprocess_image(img0)
img = np.rollaxis(img, 2, 0)
# Get mask and regression maps
if self.training:
mask, regr = get_mask_and_regr(img0, labels)
regr = np.rollaxis(regr, 2, 0)
else:
mask, regr = 0, 0
return [img, mask, regr]
| [
"google-dl-platform@googlegroups.com"
] | google-dl-platform@googlegroups.com |
b5380f194e2ebf4483ba73e4bbda03f43357c625 | efc9b70544c0bc108aaec0ed6a2aefdf208fd266 | /7_Reverse Integer.py | 89ccbc472a02918df9bb0a259e44de8755b77148 | [] | no_license | fxy1018/Leetcode | 75fad14701703d6a6a36dd52c338ca56c5fa9eff | 604efd2c53c369fb262f42f7f7f31997ea4d029b | refs/heads/master | 2022-12-22T23:42:17.412776 | 2022-12-15T21:27:37 | 2022-12-15T21:27:37 | 78,082,899 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | '''
Given a 32-bit signed integer, reverse digits of an integer.
Example 1:
Input: 123
Output: 321
Example 2:
Input: -123
Output: -321
Example 3:
Input: 120
Output: 21
Note:
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. For the purpose of this problem, assume that your function returns 0 when the reversed integer overflows.
'''
class Solution:
def reverse(self, x):
"""
:type x: int
:rtype: int
"""
res = 0
negative = False
if x < 0:
negative = True
x *= -1
while x > 0:
mod = x%10
res = 10*res + mod
x = (x-mod)//10
if res >2**31-1 or res < -2**31:
return(0)
if negative:
return(res*-1)
return(res)
| [
"noreply@github.com"
] | fxy1018.noreply@github.com |
1ed1be18e715d55229010ba193864c1a21ca05e4 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/sieve/01b5423521084f56a7ead15fa4504e4f.py | 99cb7e8e8668e1ae7fee586e05b9e99094a5c1b4 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 348 | py | def sieve(number):
nonprimes = []
primes = []
for x in range(2,number/2):
y = 2
result = 0
while (result < number +1):
result = x*y
nonprimes.append(result)
y = y + 1
for a in range(2,number):
if a not in nonprimes:
primes.append(a)
return primes
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
1cce6bb01e3e8ebd4f9aba7c62c8d07cedbb2b9f | 4c187f0f9d244e89facdddc1581bcef33e092a93 | /benchmarks/QLib/QASM_src/benstein_vazirani_23b_secret_64.py | 18e103f3200545383140d5d7ea73dd4ab19dc862 | [] | no_license | Gonaco/Super-Qool-Benchmarks | 419dea5306bcec7e502034527acffe371a4e8004 | a630f3dd6f22bebd4ce7601a772fd3a8cd3dd08c | refs/heads/master | 2021-01-25T13:40:57.523633 | 2018-04-03T09:31:56 | 2018-04-03T09:31:56 | 123,600,859 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | from openql import openql as ql
import os
import numpy as np
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, 'test_output')
ql.set_output_dir(output_dir)
config_fn = os.path.join(curdir, '/home/daniel/Master/Quantum_Computing_and_Quantum_Information/OpenQL/tests/hardware_config_cc_light.json')
platform = ql.Platform('platform_none', config_fn)
sweep_points = [1,2]
num_circuits = 1
num_qubits = 25
p = ql.Program('benstein_vazirani_23b_secret_64', num_qubits, platform)
p.set_sweep_points(sweep_points, num_circuits)
k = ql.Kernel('benstein_vazirani_23b_secret_64', platform)
k.gate('prepx',23)
k.gate('x',23)
k.gate('h',0)
k.gate('h',1)
k.gate('h',2)
k.gate('h',3)
k.gate('h',4)
k.gate('h',5)
k.gate('h',6)
k.gate('h',7)
k.gate('h',8)
k.gate('h',9)
k.gate('h',10)
k.gate('h',11)
k.gate('h',12)
k.gate('h',13)
k.gate('h',14)
k.gate('h',15)
k.gate('h',16)
k.gate('h',17)
k.gate('h',18)
k.gate('h',19)
k.gate('h',20)
k.gate('h',21)
k.gate('h',22)
k.gate('h',23)
k.gate('cnot',6y)
k.gate('h',0)
k.gate('h',1)
k.gate('h',2)
k.gate('h',3)
k.gate('h',4)
k.gate('h',5)
k.gate('h',6)
k.gate('h',7)
k.gate('h',8)
k.gate('h',9)
k.gate('h',10)
k.gate('h',11)
k.gate('h',12)
k.gate('h',13)
k.gate('h',14)
k.gate('h',15)
k.gate('h',16)
k.gate('h',17)
k.gate('h',18)
k.gate('h',19)
k.gate('h',20)
k.gate('h',21)
k.gate('h',22)
k.gate('h',23)
p.add_kernel(k)
p.compile(optimize=False)
| [
"danielmoremanza@gmail.com"
] | danielmoremanza@gmail.com |
0199368418346d8e9f8d077120096ec470eb55f7 | 80d50ea48e10674b1b7d3f583a1c4b7d0b01200f | /src/datadog_api_client/v1/model/service_level_objective_query.py | ee11656739b4377458798b17a374a2d58d95f8a3 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"MPL-2.0"
] | permissive | DataDog/datadog-api-client-python | 3e01fa630278ad0b5c7005f08b7f61d07aa87345 | 392de360e7de659ee25e4a6753706820ca7c6a92 | refs/heads/master | 2023-09-01T20:32:37.718187 | 2023-09-01T14:42:04 | 2023-09-01T14:42:04 | 193,793,657 | 82 | 36 | Apache-2.0 | 2023-09-14T18:22:39 | 2019-06-25T22:52:04 | Python | UTF-8 | Python | false | false | 1,316 | py | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import annotations
from datadog_api_client.model_utils import (
ModelNormal,
cached_property,
)
class ServiceLevelObjectiveQuery(ModelNormal):
@cached_property
def openapi_types(_):
return {
"denominator": (str,),
"numerator": (str,),
}
attribute_map = {
"denominator": "denominator",
"numerator": "numerator",
}
def __init__(self_, denominator: str, numerator: str, **kwargs):
"""
A metric-based SLO. **Required if type is metric**. Note that Datadog only allows the sum by aggregator
to be used because this will sum up all request counts instead of averaging them, or taking the max or
min of all of those requests.
:param denominator: A Datadog metric query for total (valid) events.
:type denominator: str
:param numerator: A Datadog metric query for good events.
:type numerator: str
"""
super().__init__(kwargs)
self_.denominator = denominator
self_.numerator = numerator
| [
"noreply@github.com"
] | DataDog.noreply@github.com |
f002b030cd2969fee5c668a5cae035d7f1da8581 | dbd603f5246059ce5fe3d426b4e7f5f98b944a78 | /mlmath/vector.py | 589999452c6de70857b5b5b0536d879990f1dc7c | [] | no_license | chyld/mlmath | f81522644b30e1a186032a6edbb908891730d9a7 | 7ce5606f02111d49f893f0f35e57bc72fb40cdcf | refs/heads/master | 2020-04-18T21:41:56.910701 | 2019-01-27T05:12:48 | 2019-01-27T05:12:48 | 167,772,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | class Vector:
def __init__(self, *elements):
self.elements = elements
def scale(self, amount):
return Vector(*(e * amount for e in self.elements))
def norm(self):
return sum((e ** 2 for e in self.elements)) ** 0.5
def __add__(self, other):
return Vector(*(a + b for a, b in zip(self.elements, other.elements)))
| [
"chyld.medford@gmail.com"
] | chyld.medford@gmail.com |
d8b482999535ede7ae3f2ba3547c471e1c6f9eb1 | 1b8d87b37cc6de4b0ffaedf0d5dc3877888865c3 | /fhirclient/r4models/messagedefinition_tests.py | 68e8854a920536410beae208bafde5623997ea6e | [] | no_license | Healthedata1/Flask-Alerts-Sender | d222e689de01daaa59d51aea2054d538db231cf9 | 0637cb1bb2c8af18243fce3aecc09723c2fdd155 | refs/heads/master | 2022-12-12T14:14:04.708052 | 2021-05-05T20:52:49 | 2021-05-05T20:52:49 | 231,147,534 | 1 | 0 | null | 2022-12-08T03:22:29 | 2019-12-31T21:20:13 | Python | UTF-8 | Python | false | false | 2,376 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import messagedefinition
from .fhirdate import FHIRDate
class MessageDefinitionTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("MessageDefinition", js["resourceType"])
return messagedefinition.MessageDefinition(js)
def testMessageDefinition1(self):
inst = self.instantiate_from("messagedefinition-example.json")
self.assertIsNotNone(inst, "Must have instantiated a MessageDefinition instance")
self.implMessageDefinition1(inst)
js = inst.as_json()
self.assertEqual("MessageDefinition", js["resourceType"])
inst2 = messagedefinition.MessageDefinition(js)
self.implMessageDefinition1(inst2)
def implMessageDefinition1(self, inst):
self.assertEqual(inst.category, "notification")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org")
self.assertEqual(inst.date.date, FHIRDate("2016-11-09").date)
self.assertEqual(inst.date.as_json(), "2016-11-09")
self.assertEqual(inst.eventCoding.code, "admin-notify")
self.assertEqual(inst.eventCoding.system, "http://example.org/fhir/message-events")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example")
self.assertEqual(inst.name, "EXAMPLE")
self.assertEqual(inst.publisher, "Health Level Seven, Int'l")
self.assertEqual(inst.purpose, "Defines a base example for other MessageDefinition instances.")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Message definition base example</div>")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Message definition base example")
self.assertEqual(inst.url, "http://hl7.org/fhir/MessageDefinition/example")
| [
"ehaas@healthedatainc.com"
] | ehaas@healthedatainc.com |
d754a968e1f0df26c599e84104d0e8b552cb60ae | 0d15c599ec1fed05d7acdb31cebe37a40d38c2e3 | /setup.py | 482189a2a087726c6040c10024b6ef6344ccf911 | [] | no_license | VoteIT/voteit.notes | 34753f6ed45aa6c015e9aa015d679f4ee132e4ff | ecdb9b945d7cdab0cd566b22c879b71ba3c23b75 | refs/heads/master | 2021-07-13T07:41:39.347100 | 2020-05-16T12:26:57 | 2020-05-16T12:26:57 | 133,809,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
CONTRIBUTORS = open(os.path.join(here, 'CONTRIBUTORS.rst')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = ('voteit.core',
'betahaus.viewcomponent',
'pyramid',
'colander',
'deform',
'fanstatic',)
setup(name='voteit.notes',
version='0.1dev',
description='Personal notes on proposals for VoteIT',
long_description=README + '\n\n' + CONTRIBUTORS + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pylons",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='VoteIT development team and contributors',
author_email='info@voteit.se',
url='http://www.voteit.se',
keywords='web pyramid pylons voteit',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="voteit.notes",
entry_points = {
'fanstatic.libraries': [
'voteit_notes_lib = voteit.notes.fanstaticlib:voteit_notes_lib'
],
},
)
| [
"robin@betahaus.net"
] | robin@betahaus.net |
0b3a3a34c6f90fed16abed7da992f7be4a7df450 | c89543dd926c1787c40616ed174a3d1371c54449 | /superset/databases/commands/export.py | 4d3bb7f99f251d041f0dff7b5e8faa99d90b2d68 | [
"Apache-2.0",
"OFL-1.1"
] | permissive | j420247/incubator-superset | 7c7bff330393f0e91f5e67782f35efe8c735250a | c9b9b7404a2440a4c9d3173f0c494ed40f7fa2bd | refs/heads/master | 2023-03-11T21:53:16.827919 | 2023-02-03T19:04:17 | 2023-02-03T19:04:17 | 157,780,350 | 1 | 1 | Apache-2.0 | 2023-03-07T00:14:51 | 2018-11-15T22:24:29 | TypeScript | UTF-8 | Python | false | false | 4,254 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
import json
import logging
from typing import Any, Dict, Iterator, Tuple
import yaml
from superset.databases.commands.exceptions import DatabaseNotFoundError
from superset.databases.dao import DatabaseDAO
from superset.commands.export.models import ExportModelsCommand
from superset.models.core import Database
from superset.utils.dict_import_export import EXPORT_VERSION
from superset.utils.file import get_filename
logger = logging.getLogger(__name__)
def parse_extra(extra_payload: str) -> Dict[str, Any]:
try:
extra = json.loads(extra_payload)
except json.decoder.JSONDecodeError:
logger.info("Unable to decode `extra` field: %s", extra_payload)
return {}
# Fix for DBs saved with an invalid ``schemas_allowed_for_csv_upload``
schemas_allowed_for_csv_upload = extra.get("schemas_allowed_for_csv_upload")
if isinstance(schemas_allowed_for_csv_upload, str):
extra["schemas_allowed_for_csv_upload"] = json.loads(
schemas_allowed_for_csv_upload
)
return extra
class ExportDatabasesCommand(ExportModelsCommand):
dao = DatabaseDAO
not_found = DatabaseNotFoundError
@staticmethod
def _export(
model: Database, export_related: bool = True
) -> Iterator[Tuple[str, str]]:
db_file_name = get_filename(model.database_name, model.id, skip_id=True)
file_path = f"databases/{db_file_name}.yaml"
payload = model.export_to_dict(
recursive=False,
include_parent_ref=False,
include_defaults=True,
export_uuids=True,
)
# https://github.com/apache/superset/pull/16756 renamed ``allow_csv_upload``
# to ``allow_file_upload`, but we can't change the V1 schema
replacements = {"allow_file_upload": "allow_csv_upload"}
# this preserves key order, which is important
payload = {replacements.get(key, key): value for key, value in payload.items()}
# TODO (betodealmeida): move this logic to export_to_dict once this
# becomes the default export endpoint
if payload.get("extra"):
extra = payload["extra"] = parse_extra(payload["extra"])
# ``schemas_allowed_for_csv_upload`` was also renamed to
# ``schemas_allowed_for_file_upload``, we need to change to preserve the
# V1 schema
if "schemas_allowed_for_file_upload" in extra:
extra["schemas_allowed_for_csv_upload"] = extra.pop(
"schemas_allowed_for_file_upload"
)
payload["version"] = EXPORT_VERSION
file_content = yaml.safe_dump(payload, sort_keys=False)
yield file_path, file_content
if export_related:
for dataset in model.tables:
ds_file_name = get_filename(
dataset.table_name, dataset.id, skip_id=True
)
file_path = f"datasets/{db_file_name}/{ds_file_name}.yaml"
payload = dataset.export_to_dict(
recursive=True,
include_parent_ref=False,
include_defaults=True,
export_uuids=True,
)
payload["version"] = EXPORT_VERSION
payload["database_uuid"] = str(model.uuid)
file_content = yaml.safe_dump(payload, sort_keys=False)
yield file_path, file_content
| [
"noreply@github.com"
] | j420247.noreply@github.com |
8d2ad9a8b6c480e830f3ac604361cacec07ae068 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-gsn-edf.0/gsn-edf_ut=3.5_rd=0.5_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=12/sched.py | 5807deb25f4484e9338877651fe134cfb937907d | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | -X FMLP -Q 0 -L 5 100 300
-X FMLP -Q 0 -L 5 96 300
-X FMLP -Q 1 -L 5 88 300
-X FMLP -Q 1 -L 5 84 400
-X FMLP -Q 2 -L 3 64 300
-X FMLP -Q 2 -L 3 55 250
-X FMLP -Q 3 -L 2 49 300
-X FMLP -Q 3 -L 2 48 175
47 175
35 200
33 100
31 150
26 175
24 150
18 200
18 200
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
207423db38088cf820dfc1c434376eec194dd38d | 7520e14426f46525605d87f6104b6100c0724d84 | /examples/finite_differences.py | 871c43dee817896f653bba1adcaf93428618e26f | [] | no_license | shanhaiying/pysketcher | 1b40b1b230af429f93173cb6765d0b96d9806535 | 1007b71c64b3c812d301caa3b422b5308dcf87db | refs/heads/master | 2021-01-17T07:55:17.362970 | 2015-01-28T18:09:09 | 2015-01-28T18:09:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,224 | py | """
Illustrate forward, backward and centered finite differences
in four figures.
"""
from pysketcher import *
#test_test()
xaxis = 2
drawing_tool.set_coordinate_system(0, 7, 1, 6, axis=False)
f = SketchyFunc1('$u(t)$')
x = 3 # center point where we want the derivative
xb = 2 # x point used for backward difference
xf = 4 # x point used for forward difference
p = (x, f(x)) # center point
pf = (xf, f(xf)) # forward point
pb = (xb, f(xb)) # backward point
r = 0.1 # radius of circles placed at key points
c = Circle(p, r).set_linecolor('blue')
cf = Circle(pf, r).set_linecolor('red')
cb = Circle(pb, r).set_linecolor('green')
# Points in the mesh
p0 = point(x, xaxis) # center point
pf0 = point(xf, xaxis) # forward point
pb0 = point(xb, xaxis) # backward point
tick = 0.05
# 1D mesh with three points
mesh = Composition({
'tnm1': Text('$t_{n-1}$', pb0 - point(0, 0.3)),
'tn': Text('$t_{n}$', p0 - point(0, 0.3)),
'tnp1': Text('$t_{n+1}$', pf0 - point(0, 0.3)),
'axis': Composition({
'hline': Line(pf0-point(3,0), pb0+point(3,0)).\
set_linecolor('black').set_linewidth(1),
'tick_m1': Line(pf0+point(0,tick), pf0-point(0,tick)).\
set_linecolor('black').set_linewidth(1),
'tick_n': Line(p0+point(0,tick), p0-point(0,tick)).\
set_linecolor('black').set_linewidth(1),
'tick_p1': Line(pb0+point(0,tick), pb0-point(0,tick)).\
set_linecolor('black').set_linewidth(1)}),
})
# 1D mesh with three points for Crank-Nicolson
mesh_cn = Composition({
'tnm1': Text('$t_{n}$', pb0 - point(0, 0.3)),
'tn': Text(r'$t_{n+\frac{1}{2}}$', p0 - point(0, 0.3)),
'tnp1': Text('$t_{n+1}$', pf0 - point(0, 0.3)),
'axis': Composition({
'hline': Line(pf0-point(3,0), pb0+point(3,0)).\
set_linecolor('black').set_linewidth(1),
'tick_m1': Line(pf0+point(0,tick), pf0-point(0,tick)).\
set_linecolor('black').set_linewidth(1),
'tick_n': Line(p0+point(0,tick), p0-point(0,tick)).\
set_linecolor('black').set_linewidth(1),
'tick_p1': Line(pb0+point(0,tick), pb0-point(0,tick)).\
set_linecolor('black').set_linewidth(1)}),
})
# Vertical dotted lines at each mesh point
vlinec = Line(p, p0).set_linestyle('dotted').\
set_linecolor('blue').set_linewidth(1)
vlinef = Line(pf, pf0).set_linestyle('dotted').\
set_linecolor('red').set_linewidth(1)
vlineb = Line(pb, pb0).set_linestyle('dotted').\
set_linecolor('green').set_linewidth(1)
# Compose vertical lines for each type of difference
forward_lines = Composition({'center': vlinec, 'right': vlinef})
backward_lines = Composition({'center': vlinec, 'left': vlineb})
centered_lines = Composition({'left': vlineb, 'right': vlinef})
centered_lines2 = Composition({'left': vlineb, 'right': vlinef,
'center': vlinec})
# Tangents illustrating the derivative
domain = [1, 5]
domain2 = [2, 5]
forward_tangent = Line(p, pf).new_interval(x=domain2).\
set_linestyle('dashed').set_linecolor('red')
backward_tangent = Line(pb, p).new_interval(x=domain).\
set_linestyle('dashed').set_linecolor('green')
centered_tangent = Line(pb, pf).new_interval(x=domain).\
set_linestyle('dashed').set_linecolor('blue')
h = 1E-3 # h in finite difference approx used to compute the exact tangent
exact_tangent = Line((x+h, f(x+h)), (x-h, f(x-h))).\
new_interval(x=domain).\
set_linestyle('dotted').set_linecolor('black')
forward = Composition(
dict(tangent=forward_tangent,
point1=c, point2=cf, coor=forward_lines,
name=Text('forward',
forward_tangent.geometric_features()['end'] + \
point(0.1,0), alignment='left')))
backward = Composition(
dict(tangent=backward_tangent,
point1=c, point2=cb, coor=backward_lines,
name=Text('backward',
backward_tangent.geometric_features()['end'] + \
point(0.1,0), alignment='left')))
centered = Composition(
dict(tangent=centered_tangent,
point1=cb, point2=cf, point=c, coor=centered_lines2,
name=Text('centered',
centered_tangent.geometric_features()['end'] + \
point(0.1,0), alignment='left')))
exact = Composition(dict(graph=f, tangent=exact_tangent))
forward = Composition(dict(difference=forward, exact=exact)).\
set_name('forward')
backward = Composition(dict(difference=backward, exact=exact)).\
set_name('backward')
centered = Composition(dict(difference=centered, exact=exact)).\
set_name('centered')
all = Composition(
dict(exact=exact, forward=forward, backward=backward,
centered=centered)).set_name('all')
for fig in forward, backward, centered, all:
drawing_tool.erase()
fig.draw()
mesh.draw()
drawing_tool.display()
drawing_tool.savefig('fd_'+fig.get_name())
# Crank-Nicolson around t_n+1/2
drawing_tool.erase()
centered.draw()
mesh_cn.draw()
drawing_tool.display()
drawing_tool.savefig('fd_centered_CN')
raw_input()
| [
"hpl@simula.no"
] | hpl@simula.no |
f6531f7cfb43c1e68e3acf5fc0ab5dfd6c670dad | 71b11008ab0455dd9fd2c47107f8a27e08febb27 | /09、UI自动化测试及黑马头条项目实战/day16/03代码/scripts/mp/test_publish_article.py | bb0c9fc390b7a3689f3c4dbcddb3c679b9e20b14 | [] | no_license | zmh19941223/heimatest2021 | 49ce328f8ce763df0dd67ed1d26eb553fd9e7da4 | 3d2e9e3551a199bda9945df2b957a9bc70d78f64 | refs/heads/main | 2023-08-25T17:03:31.519976 | 2021-10-18T05:07:03 | 2021-10-18T05:07:03 | 418,348,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | # 定义测试类
import logging
import allure
import pytest
from config import BaseDir
from page.mp.home_page import HomeProxy
from page.mp.login_page import LoginProxy
from page.mp.publish_page import PublishProxy
from utils import UtilsDriver, is_exist, get_case_data
case_data = get_case_data(BaseDir + "/data/mp/test_login_data.json")
@pytest.mark.run(order=1)
class TestPublishArticle:
# 定义类级别的fixture初始化操作方法
def setup_class(self):
self.login_proxy = LoginProxy()
self.home_proxy = HomeProxy()
self.publish_proxy = PublishProxy()
# 定义类级别的fixture销毁操作方法
def teardown_class(self):
UtilsDriver.quit_mp_driver()
# 定义登录的测试用例
@pytest.mark.parametrize("username, code, expect", case_data)
@allure.severity(allure.severity_level.CRITICAL)
def test_login(self, username, code, expect):
logging.info("用例的数据如下:用户名:{}, 验证码:{}, 预期结果:{}".format(username,
code, expect))
print(username, code)
self.login_proxy.login(username, code) # 登录
allure.attach(UtilsDriver.get_mp_driver().get_screenshot_as_png(), "登录截图", allure.attachment_type.PNG)
username = self.home_proxy.get_username_msg() # 获取登录后的用户名信息
assert expect == username # 根据获取到的用户名进行断言
# 定义测试方法
@allure.severity(allure.severity_level.CRITICAL)
def test_publish_article(self):
self.home_proxy.go_publish_page() # 跳转到发布文章页面
self.publish_proxy.publish_article("发布文章_0828_15", "发布文章_0710_14发布文章_0710_14", "数据库")
assert is_exist(UtilsDriver.get_mp_driver(), "新增文章成功")
| [
"1780858508@qq.com"
] | 1780858508@qq.com |
806afa96acf010a531f3f9ea7f1949f08b8aed32 | 0bd00c67608b1ce6c5e76d77b3ced4cce64ee5a0 | /python/problem57.py | 1052c9808248f12fd2498667f322d48b963d14b6 | [] | no_license | patrickdean/euler | 420ef8b268764a7e47120c5879063a7d9164fa09 | d42e94db3713bbbb65cb27e87ce1775211f53790 | refs/heads/master | 2020-05-18T17:44:18.738482 | 2014-05-20T00:48:51 | 2014-05-20T00:54:17 | 4,303,268 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
# Patrick Dean
# Project Euler: #057
# In the first one-thousand expansions of the continued fraction of sqrt(2), find the number fractions contain a numerator with more digits than denominator
def two_conv_gen(n):
s= [2] * n
return [1] + s
def convergent(lst):
if len(lst) == 1:
return (lst[0], 1)
x = lst[-1:]
i = 1
num, den = x[0], 1
while i < len(lst):
i += 1
num, den = x[0] * num + den, num
x = lst[-(i+1):]
return (num, den)
l = two_conv_gen(1000)
x = [convergent(l[:i]) for i in range(1, len(l))]
print sum(1 for a, b in x if len(str(a)) > len(str(b))) | [
"="
] | = |
9962f987b25843ee987ebf75bc28b9e9c9c1dc90 | c2fd315faa3d4ad91474197e0c55526f0db83e3f | /nonrecursive_tree_search.py | a1eaef60ee1e98a2b33bf5a79d30dae926a947c2 | [] | no_license | zackmdavis/Standard_Algorithms | 8db9f912bddcb5bf14756d21ce67745ddbcc69c9 | 655309a4440c7baea26de291f9881494b4695827 | refs/heads/master | 2021-01-17T08:58:52.214180 | 2016-04-03T03:01:03 | 2016-04-03T03:01:03 | 4,513,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,946 | py | #!/usr/bin/env python3
# I usually think of depth-first tree search in terms of recursive function
# invocations, but we should be able to manage the node-stack ourselves instead
# of leaving it implicit in the call stack!
# Let's consider a tree where the leaf nodes have values, and, given a tree, we
# want to find the greatest value contained amongst its leaves.
import inspect
import logging
import sys
import unittest
class Node:
def __init__(self, value, children=None):
# assert that leaves have initial values and internal nodes do not
if children is None:
assert value is not None
else:
assert value is None
self.value = value
# default may be changed by parent Node's __init__ializer!
self.parent = None
if children is None:
self.children = []
else:
self.children = children
for child in children:
child.parent = self
def __repr__(self):
return "<Node: id={} value={} ({} children)>".format(
id(self), self.value, len(self.children))
def recursive_search(node,
# Yes, I know the gotcha about mutable default
# values. Wait for it ...
visit_order=[]):
visit_order.append(node)
if not node.children:
return node.value
else:
return max(recursive_search(child)
for child in node.children)
def stack_search(root,
# Wait for it ...
visit_order=[]):
stack = [root]
while stack:
node = stack.pop()
visit_order.append(node)
if not node.children:
# propagate what we've learned up the tree
messenger = node
while (messenger.parent is not None and
(messenger.parent.value is None or
messenger.parent.value < messenger.value)):
logging.debug(
"setting value of {} to {} because of child {}".format(
messenger.parent, messenger.value, messenger))
messenger.parent.value = messenger.value
messenger = messenger.parent
else:
for child in reversed(node.children):
stack.append(child)
return root.value
our_tree = Node(None,
[Node(None,
[Node(None,
[Node(1),
Node(2),
Node(None,
[Node(3),
Node(None,
[Node(4),
Node(None,
[Node(3)]),
Node(2)])])])]),
Node(None,
[Node(None,
[Node(None,
[Node(1),
Node(2),
Node(None,
[Node(3)])])])])])
class RecursiveSearchTestCase(unittest.TestCase):
def test_equivalence(self):
search_methods = [recursive_search, stack_search]
for search_method in search_methods:
self.assertEqual(4, search_method(our_tree))
self.assertEqual(
# We have fun around here.
*[inspect.signature(
search_method).parameters['visit_order'].default
for search_method in search_methods]
)
if __name__ == "__main__":
if sys.argv[1:]:
arg, *_rest = sys.argv[1:]
else:
arg = None
if arg == "debug":
logging_kwargs = {'level': logging.DEBUG}
else:
logging_kwargs = {}
sys.argv[1:] = []
logging.basicConfig(**logging_kwargs)
unittest.main()
| [
"code@zackmdavis.net"
] | code@zackmdavis.net |
76547e9354f9b0bb944103b940eec27806a919ec | 1faf574fc2592e8c65043021762ed6eab441feb5 | /프로그래머스/카카오_겨울_인턴_호텔방배정.py | f0d126466c2a7c547b29d692c9b9132de753635c | [] | no_license | YooGunWook/coding_test | 4e9c8851a025271f79408fd7a0f097f07351b1e7 | 2806ecf5db3714d1a4b5dbfaa2b584bb8b6166a0 | refs/heads/master | 2022-10-15T03:37:46.129384 | 2022-09-26T13:45:53 | 2022-09-26T13:45:53 | 240,265,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | import bisect
import collections
import copy
# Only 정확성
def solution(k, room_number):
rooms = {} # 방 정보
answer = [] # 배정 정보
for i in range(1, k + 1):
rooms[i] = 0 # 각 방별로 정보를 넣어준다.
room_num = list(rooms.keys()) # 이진탐색용
for idx, room in enumerate(room_number):
if rooms[room] == 0: # 방에 사람이 없으면 바로 넣어줌
rooms[room] = 1
answer.append(room)
else: # 아니면 이진 탐색으로 가장 가까운 방에 배정
while True:
cand = bisect.bisect_right(room_num, room) # 오른쪽 기준 탐색
if rooms[room_num[cand]] == 0: # 비어있으면 배정
rooms[room_num[cand]] = 1
answer.append(room_num[cand])
break
room = room_num[cand] # 다음 방으로 탐색
return answer
# 정확성 + 효율성
def solution2(k, room_number):
rooms = collections.defaultdict(int) # 방 정보 저장 + 방문 기록
answer = []
for room in room_number:
n = room # 방문 기록으로 만든다.
visit = [n] # 방문 기록들
while n in rooms: # 각 방별로 조회
n = rooms[n] # 새로운 n을 설정
visit.append(n) # 이걸 통해 빈 방을 찾을 때까지 조회한다
answer.append(n) # 정답 넣기
for vi in visit: # 방문 기록을 저장한다.
rooms[vi] = n + 1 # 다음으로 가야될 방을 이걸로 저장해주는 것!
return answer
room_number = [1, 3, 4, 1, 3, 1]
k = 10
print(solution2(k, room_number)) | [
"gunwook0307@yonsei.ac.kr"
] | gunwook0307@yonsei.ac.kr |
a1ac33d8d408048b216a7e510c1fb0621d89afe5 | 890d11bd06222125b4b4f4af7cea814544755403 | /graff/db.py | 6d91204e1996641bee36be6458a2f7e6cc6b3cea | [] | no_license | eklitzke/graff | 8f80f4a5ea015fa320c9dbd46dedbb6067d7ce2e | 8146c4a4397d2ee481549c407fb976c0c763fcc8 | refs/heads/master | 2021-01-01T05:49:33.717421 | 2011-12-26T21:28:04 | 2011-12-26T21:28:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,070 | py | import datetime
import hashlib
import os
from sqlalchemy import create_engine, func, Column, ForeignKey
from sqlalchemy.types import Integer, String, Float, DateTime, Boolean
from sqlalchemy.orm import sessionmaker, relationship, backref
from sqlalchemy.ext.declarative import declarative_base
import warnings
from graff import config
from graff import crypto
from graff import geo
if config.get('memory', True):
engine = create_engine('sqlite:///:memory:')
now = func.datetime()
else:
engine = create_engine('mysql+mysqldb://' +
config.get('db_user', 'graff') + ':' +
config.get('db_pass', 'gr4ff') + '@' +
config.get('db_host', '127.0.0.1') + '/' +
config.get('db_schema', 'graff'), pool_recycle=3600)
now = func.now()
Session = sessionmaker(bind=engine)
class _Base(object):
@property
def encid(self):
if hasattr(self, 'secret_key'):
return crypto.encid(self.id, self.secret_key)
else:
return crypto.encid(self.id)
@classmethod
def create(cls, session, **kw):
obj = cls(**kw)
session.add(obj)
return obj
@classmethod
def by_id(cls, session, row_id):
return session.query(cls).filter(cls.id == row_id).first()
@classmethod
def from_encid(cls, session, encid):
if hasattr(cls, 'secret_key'):
row_id = crypto.decid(encid, cls.secret_key)
else:
row_id = crypto.decid(encid)
return cls.by_id(session, row_id)
@classmethod
def most_recent(cls, session, limit):
return session.query(cls).order_by(cls.id.desc()).limit(limit)
Base = declarative_base(cls=_Base)
GEOHASH_PRECISION = 12
class Photo(Base):
__tablename__ = 'photo'
id = Column(Integer, primary_key=True)
body_hash = Column(String(40), nullable=False)
content_type = Column(String(64), nullable=False)
fsid = Column(String(32), nullable=False)
latitude = Column(Float)
longitude = Column(Float)
geohash = Column(String(GEOHASH_PRECISION))
make = Column(String(128))
model = Column(String(128))
photo_time = Column(DateTime, nullable=False, default=now)
photo_height = Column(Integer, nullable=False)
photo_width = Column(Integer, nullable=False)
remote_ip = Column(Integer, nullable=False)
sensor = Column(Boolean)
time_created = Column(DateTime, nullable=False, default=now)
user_id = Column(Integer, ForeignKey('user.id'))
user = relationship('User', backref=backref('photos', order_by=id))
@property
def time_ago(self):
delta = datetime.datetime.now() - self.time_created
if delta < datetime.timedelta(seconds=30):
return 'a moment ago'
elif delta < datetime.timedelta(seconds=120):
return '1 minute ago'
elif delta < datetime.timedelta(seconds=59 * 60):
return '%d minutes ago' % (int(delta.total_seconds() / 60.0),)
elif delta < datetime.timedelta(seconds=120 * 60):
return '1 hour ago'
elif delta < datetime.timedelta(seconds=24 * 60 * 60):
return '%d hours ago' % (int(delta.total_seconds() / 3600.0),)
elif delta < datetime.timedelta(seconds=2 * 86400):
return '1 day ago'
else:
return '%d days ago' % (int(delta.total_seconds() / 84600.0),)
@classmethod
def get_nearby(cls, session, limit=None, user=None, bounds=None):
"""Get all of the photos in an area (possibly unbounded). Results are
returned in descending order of age (i.e. newest photos first).
"""
assert limit is not None
q = session.query(cls)
if bounds:
hashcode = geo.get_bounding_geohash(bounds['n'], bounds['w'], bounds['s'], bounds['e'])
q.filter(cls.geohash.like(hashcode + '%')).filter(cls.latitude <= bounds['n']).filter(cls.latitude >= bounds['s']).filter(cls.longitude >= bounds['w']).filter(cls.longitude <= bounds['e'])
if user:
u = User.by_name(session, user)
q.filter(cls.user_id == u.id)
return q.order_by(cls.time_created.desc()).limit(limit)
def to_json(self):
return {
'id': self.encid,
'latitude': self.latitude,
'longitude': self.longitude,
'time_ago': self.time_ago,
'time_created': int(self.time_created.strftime('%s')),
'user': self.user.name if self.user_id else None
}
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
pw_hash = Column(String(56), nullable=False)
email = Column(String)
location = Column(String)
signup_ip = Column(Integer, nullable=False)
login_ip = Column(Integer, nullable=False)
time_created = Column(DateTime, nullable=False, default=now)
@classmethod
def create(cls, session, **kwargs):
if session.query(cls).filter(cls.name == kwargs['name']).first() is not None:
return 'That username has already been taken'
if kwargs['email'] and session.query(cls).filter(cls.email == kwargs['email']).first() is not None:
return 'That email has already been registered'
with open('/dev/random', 'rb') as devrandom:
salt = devrandom.read(8)
hashval = hashlib.sha1(salt + kwargs.pop('password').encode('ascii')).digest()
kwargs['pw_hash'] = (salt + hashval).encode('hex')
kwargs['signup_ip'] = kwargs['login_ip'] = kwargs.pop('remote_ip')
return super(User, cls).create(session, **kwargs)
@classmethod
def authenticate(cls, session, name, password, remote_ip):
row = session.query(cls).filter(cls.name == name).first()
if row is None:
return None
row_hash = row.pw_hash.decode('hex')
if hashlib.sha1(str(row_hash[:8]) + password.encode('ascii')).digest() == row_hash[8:]:
row.login_ip = remote_ip
return row
return None
@classmethod
def by_name(cls, session, name):
return session.query(cls).filter(cls.name == name).first()
# set up encryption keys
g = globals()
crypto_keys = set()
for k, v in g.items():
find_key = False
try:
if issubclass(v, Base) and v is not Base:
find_key = True
except TypeError:
continue
if find_key:
if 'secret_key' in v.__dict__:
warnings.warn('static key set for %s' % (v,))
elif config.get('key_' + v.__name__) is not None:
v.secret_key = config.get('key_' + v.__name__)
elif config.get('memory'):
v.secret_key = os.urandom(16)
else:
v.secret_key = '?' * 16
if v.secret_key in crypto_keys:
warnings.warn('re-using crypto key for %s' % (v,))
crypto_keys.add(v.secret_key)
del crypto_keys
del g
if config.get('memory', True):
Base.metadata.create_all(engine)
| [
"evan@eklitzke.org"
] | evan@eklitzke.org |
8c49e8ae73abdd7a66789562176f376394c27e17 | 64f9f39485900853d64d1f727a80e097a6836053 | /dabi/dabi/items.py | 71177c9d7eedfa5c63ee957c1921f527c8b53231 | [] | no_license | timbortnik/dabi-scraper | 9f09e6f2e639b388106725f2a883b15fb7d7eb21 | 4262714b9ac62359b938b417abafae62cb76cfe3 | refs/heads/master | 2021-04-03T07:41:28.409473 | 2018-03-08T20:28:39 | 2018-03-08T20:28:39 | 124,365,911 | 0 | 0 | null | 2018-03-08T16:28:37 | 2018-03-08T09:07:56 | Python | UTF-8 | Python | false | false | 251 | py | from scrapy.contrib.loader import ItemLoader
from scrapy.contrib.loader.processor import TakeFirst, MapCompose
class TakeFirstItemLoader(ItemLoader):
default_output_processor = TakeFirst()
default_input_processor = MapCompose(unicode.strip)
| [
"dchaplinsky@conversionscience.co.uk"
] | dchaplinsky@conversionscience.co.uk |
e9d28e0e53301a24a36450014edba54f0c72b8ff | e53aa5ebfff14c484942cb6ae57db98c15000ee7 | /test/integration/ggrc/services/test_collection_post.py | 6b2579c1484b4853f4b43db50c36186275fe8969 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dondublon/ggrc-core | bde266be2b8918afb85e7f659a561e63f49bd748 | ea8258f0eb58a4b04f8c7b85c9ab9ae1e87cd228 | refs/heads/release/0.10-Raspberry | 2021-01-21T18:21:53.202351 | 2017-05-21T21:55:22 | 2017-05-21T21:55:22 | 92,039,199 | 0 | 0 | null | 2017-05-22T10:03:43 | 2017-05-22T10:03:43 | null | UTF-8 | Python | false | false | 7,377 | py | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for collection post service."""
import json
from ggrc import db
from ggrc import models
from integration.ggrc.services import TestCase
class TestCollectionPost(TestCase):
"""Test for collection post api calls."""
@staticmethod
def get_location(response):
"""Ignore the `http://localhost` prefix of the Location"""
return response.headers['Location'][16:]
@staticmethod
def headers(*args, **kwargs):
"""Get request headers."""
ret = list(args)
ret.append(('X-Requested-By', 'Unit Tests'))
ret.extend(kwargs.items())
return ret
def test_collection_post_successful(self):
"""Test normal successful collection post call."""
data = json.dumps(
{'services_test_mock_model': {'foo': 'bar', 'context': None}})
self.client.get("/login")
response = self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assertStatus(response, 201)
self.assertIn('Location', response.headers)
response = self.client.get(
self.get_location(response), headers=self.headers())
self.assert200(response)
self.assertIn('Content-Type', response.headers)
self.assertEqual('application/json', response.headers['Content-Type'])
self.assertIn('services_test_mock_model', response.json)
self.assertIn('foo', response.json['services_test_mock_model'])
self.assertEqual('bar', response.json['services_test_mock_model']['foo'])
# check the collection, too
response = self.client.get(self.mock_url(), headers=self.headers())
self.assert200(response)
self.assertEqual(
1, len(response.json['test_model_collection']['test_model']))
self.assertEqual(
'bar', response.json['test_model_collection']['test_model'][0]['foo'])
def test_successful_single_array(self):
"""Test collection post successful single array."""
data = json.dumps(
[{'services_test_mock_model': {'foo': 'bar', 'context': None}}])
self.client.get("/login")
response = self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assert200(response)
self.assertEqual(type(response.json), list)
self.assertEqual(len(response.json), 1)
response = self.client.get(self.mock_url(), headers=self.headers())
self.assert200(response)
self.assertEqual(
1, len(response.json['test_model_collection']['test_model']))
self.assertEqual(
'bar', response.json['test_model_collection']['test_model'][0]['foo'])
def test_successful_multiple(self):
"""Test collection post successful multiple."""
data = json.dumps([
{'services_test_mock_model': {'foo': 'bar1', 'context': None}},
{'services_test_mock_model': {'foo': 'bar2', 'context': None}},
])
self.client.get("/login")
response = self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assert200(response)
self.assertEqual(type(response.json), list)
self.assertEqual(len(response.json), 2)
self.assertEqual(
'bar1', response.json[0][1]['services_test_mock_model']['foo'])
self.assertEqual(
'bar2', response.json[1][1]['services_test_mock_model']['foo'])
response = self.client.get(self.mock_url(), headers=self.headers())
self.assert200(response)
self.assertEqual(
2, len(response.json['test_model_collection']['test_model']))
def test_multiple_with_errors(self):
"""Test collection post successful multiple with errors."""
data = json.dumps([
{'services_test_mock_model':
{'foo': 'bar1', 'code': 'f1', 'context': None}},
{'services_test_mock_model':
{'foo': 'bar1', 'code': 'f1', 'context': None}},
{'services_test_mock_model':
{'foo': 'bar2', 'code': 'f2', 'context': None}},
{'services_test_mock_model':
{'foo': 'bar2', 'code': 'f2', 'context': None}},
])
self.client.get("/login")
response = self.client.post(
self.mock_url(),
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assertEqual(400, response.status_code)
self.assertEqual([400], [i[0] for i in response.json])
response = self.client.get(self.mock_url(), headers=self.headers())
self.assert200(response)
self.assertEqual(
0, len(response.json['test_model_collection']['test_model']))
def test_post_bad_request(self):
"""Test collection post with invalid content."""
response = self.client.post(
self.mock_url(),
content_type='application/json',
data='This is most definitely not valid content.',
headers=self.headers(),
)
self.assert400(response)
def test_bad_content_type(self):
"""Test post with bad content type."""
response = self.client.post(
self.mock_url(),
content_type='text/plain',
data="Doesn't matter, now does it?",
headers=self.headers(),
)
self.assertStatus(response, 415)
def test_post_relationship(self):
"""Test integrity error on relationship collection post.
Posting duplicate relationships should have a mechanism for removing
duplicates from the post request and fixing unique integrity errors.
"""
db.session.add(models.Policy(id=144, title="hello"))
db.session.add(models.Policy(id=233, title="world"))
db.session.add(models.Policy(id=377, title="bye"))
db.session.commit()
self.client.get("/login")
data = json.dumps([{
"relationship": {
"source": {"id": 144, "type": "Policy"},
"destination": {"id": 233, "type": "Policy"},
"context": None,
},
}])
response = self.client.post(
"/api/relationships",
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assert200(response)
relationships = models.Relationship.eager_query().all()
self.assertEqual(len(relationships), 1)
rel1 = relationships[0]
self.assertEqual({144, 233}, {rel1.source.id, rel1.destination.id})
data = json.dumps([{
"relationship": { # this should be ignored
"source": {"id": 144, "type": "Policy"},
"destination": {"id": 233, "type": "Policy"},
"context": None,
},
}, {
"relationship": {
"source": {"id": 377, "type": "Policy"},
"destination": {"id": 144, "type": "Policy"},
"context": None,
},
}, {
"relationship": { # Refactored api will ignore this
"source": {"id": 144, "type": "Policy"},
"destination": {"id": 377, "type": "Policy"},
"context": None,
},
}])
response = self.client.post(
"/api/relationships",
content_type='application/json',
data=data,
headers=self.headers(),
)
self.assert200(response)
relationships = models.Relationship.eager_query().all()
self.assertEqual(len(relationships), 3) # This should be 2
rel1 = relationships[0]
| [
"zidarsk8@gmail.com"
] | zidarsk8@gmail.com |
2dadad188a8092ba4e9d3982cd96b444bb4ea81e | 3e5fc180f90d63bcff54eab1cea310f3a2bbea10 | /manage.py | d4baaecbe6653ea8773bc39d8cb8bc5c57dddba2 | [] | no_license | eht16/dpaste.de | fbb9aa24c0fdf8373350d1d1f162f7bd596e60a6 | 2b9797ae125808567cfdcac9292a3d5f58d42aab | refs/heads/master | 2021-01-17T03:42:55.691283 | 2013-03-31T09:20:01 | 2013-03-31T09:20:01 | 2,736,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | #!/usr/bin/env python
import os
import sys
from django.core.management import execute_from_command_line
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pastebin.conf.local.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"enrico.troeger@uvena.de"
] | enrico.troeger@uvena.de |
b26774f0589d15eb68e5fa29c1c34e7a62e3d687 | 43a07c7e2b7f46e95e4693afa11ddbcce195c262 | /yatra/urls.py | 1320df4d2f2937954e47acdf2295fde68c91fb2a | [] | no_license | ashokkuikel/yatra | 217ae9432852e164cec12c7a3aca55440cebfb91 | 3a1a5d7df6f754a1d5de1b07172f28496cd4beec | refs/heads/master | 2020-03-07T00:53:40.816789 | 2016-07-03T03:16:33 | 2016-07-03T03:16:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | """yatra URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from tour.views import *
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', HomeView.as_view(), name="home"),
url(r'^logout/$', LogoutView.as_view(), name="logout"),
url(r'^dashboard/$', DashboardView.as_view(), name="dashboard"),
url(r'^search/$', SearchView.as_view(), name="search"),
url(r'^plan/(?P<pk>\d+)/$', PlanView.as_view(), name="plan"),
url(r'^visualize/$', VisualizeView.as_view(), name="visualize"),
]
| [
"bibekdahal.bd16@gmail.com"
] | bibekdahal.bd16@gmail.com |
d810b2f8cd0199f8b3c9e947cef421163bf2a574 | 5a142fb7312fedd4a0386247337b0188112b426e | /project2/face-reco/reconnaissance avec sqlite/trainner.py | 4ceedbed3e17fbc06e80d1f7ae6c1d751ae9b94a | [] | no_license | rolandus10/Projet_miroir | aece30aabe543d0165554476938c728a4e341af1 | 71cbca1fe2a0863b108f045f2f6f84e5eda45705 | refs/heads/master | 2020-04-25T16:54:23.361658 | 2019-05-10T14:56:19 | 2019-05-10T14:56:19 | 172,928,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py | import os
import cv2
import numpy as np
from PIL import Image
recognizer= cv2.face.createLBPHFaceRecognizer()
path='dataSet'
def getImages_And_ID (path):
''' crée une liste avec les chemin relatif des différentes images '''
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faces=[]
IDs=[]
for imPath in imagePaths:
#faceImg est une image au format PIL ==> on doit la convertir en numpy car cv2 ne travail qu'avec des format numpy
faceImg=Image.open(imPath).convert('L')
faceNp=np.array(faceImg,'uint8')
id=int(os.path.split(imPath)[-1].split('.')[1])
# on rempli les listes
faces.append(faceNp)
IDs.append(id)
cv2.imshow("training",faceNp)
cv2.waitKey(10)
return np.array(IDs),faces
IDs,faces=getImages_And_ID (path)
recognizer.train(faces,IDs)
# le fichier recognizer doit exister!
recognizer.save('recognizer/trainingData.yml')
cv2.destroyAllWindows()
os.system("pause")
| [
"pi@raspberrypi"
] | pi@raspberrypi |
cd1abe48746f3e5e7806bdad54ba17455c93c632 | 9a206d604ea4bb976c35e8ea2a20abc20e2086aa | /scripts/RNN_217.py | 10290816a41409706b0eb01e4986b0b5a5f2d296 | [
"MIT"
] | permissive | ShepherdCode/BuildingEnergy | 24c9e35bc26a9fba2dc5aa697fd8d0f5a6e46051 | a2b5a260fed1a0adb57ffe373d3971099b1db66b | refs/heads/main | 2023-06-04T02:50:57.513211 | 2021-05-06T11:11:00 | 2021-05-06T11:11:00 | 349,497,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,487 | py | #!/usr/bin/env python
# coding: utf-8
# # RNN
# Compare to RNN_218. No smoothing. Predictors = hour-of-day, day-of-year. Given 12 hrs, predict 12 hr starting 24 hr ahead. Train on year 1, test on year 2.
#
# In[1]:
from os import listdir
import csv
from zipfile import ZipFile
import numpy as np
import pandas as pd
from scipy import stats # mode
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import SimpleRNN
from keras.layers import LSTM
from keras.layers import GRU
from keras.layers import TimeDistributed
from keras.layers import Dense
from keras.losses import MeanSquaredError
import matplotlib.pyplot as plt
from matplotlib import colors
mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1
np.set_printoptions(precision=2)
# In[2]:
# Constants
EPOCHS=50 # use 5 for software testing, 50 for model testing
SITE = 'Eagle'
PREDICTORS = ['hour','month','doy','meter','cloudCoverage', 'airTemperature', 'dewTemperature', 'precipDepth1HR', 'precipDepth6HR', 'seaLvlPressure', 'windDirection', 'windSpeed']
PREDICTORS = ['hour','doy'] # short list for testing
NUM_PREDICTORS=len(PREDICTORS)
print("PREDICTORS=",NUM_PREDICTORS,PREDICTORS)
PREDICTED_VARIABLE = 'meter'
STEPS_HISTORY = 24
STEPS_FORWARD = 12
STEPS_FUTURE = 12
METER_FILE='steam.csv'
WEATHER_FILE='weather.csv'
EXAMPLE='Eagle_lodging_Edgardo'
SITE_BUILDINGS = None
# In[3]:
DATAPATH=''
try:
# On Google Drive, set path to my drive / data directory.
from google.colab import drive
IN_COLAB = True
PATH='/content/drive/'
drive.mount(PATH)
DATAPATH=PATH+'My Drive/data/' # must end in "/"
except:
# On home computer, set path to local data directory.
IN_COLAB = False
DATAPATH='data/' # must end in "/"
ZIP_FILE='BuildingData.zip'
ZIP_PATH = DATAPATH+ZIP_FILE
MODEL_FILE='Model' # will be used later to save models
# In[4]:
def read_zip_to_panda(zip_filename,csv_filename):
zip_handle = ZipFile(zip_filename)
csv_handle = zip_handle.open(csv_filename)
panda = pd.read_csv(csv_handle)
return panda
def fix_date_type(panda):
# Convert the given timestamp column to the pandas datetime data type.
panda['timestamp'] = pd.to_datetime(panda['timestamp'], infer_datetime_format = True)
indexed = panda.set_index(['timestamp'])
return indexed
# In[5]:
def load_weather_for_site(site):
wet_df = read_zip_to_panda(ZIP_PATH,WEATHER_FILE)
wet_df = fix_date_type(wet_df)
site_df = wet_df.loc[wet_df['site_id'] == site]
# Drop the site, which is constant (we selected for one site).
site_df = site_df.drop(['site_id'],axis=1)
site_df.insert(0,'hour',0)
site_df.insert(1,'month',0)
site_df.insert(2,'doy',0)
L=len(site_df)
for i in range(0,L):
dt=site_df.index[i]
hour=dt.hour
month=dt.month
doy=dt.dayofyear
site_df.iat[i,0] = hour
site_df.iat[i,1] = month
site_df.iat[i,2] = doy
return site_df
one_site_weather = load_weather_for_site(SITE)
one_site_weather.tail()
# In[6]:
def load_meter_for_building(bldg):
all_df = read_zip_to_panda(ZIP_PATH,METER_FILE)
all_df = fix_date_type(all_df)
global SITE_BUILDINGS
SITE_BUILDINGS = [x for x in all_df.columns if x.startswith(SITE)]
site_series = all_df[bldg]
site_df = site_series.to_frame()
#site_df = all_df.loc[all_df['site_id'] == site]
# Change column name from building name to meter.
site_df = site_df.rename(columns={bldg : PREDICTED_VARIABLE})
return site_df
one_bldg_meter = load_meter_for_building(EXAMPLE)
print(type(one_bldg_meter))
one_bldg_meter.tail()
# In[7]:
def prepare_for_learning(wdf,mdf):
# Concatenate weather and meter.
df = pd.concat([wdf,mdf],axis=1)
num_samples = len(df) - STEPS_FUTURE - STEPS_HISTORY
X_shape = (num_samples,STEPS_FUTURE,NUM_PREDICTORS)
Y_shape = (num_samples,STEPS_FUTURE)
X=np.zeros(X_shape)
y=np.zeros(Y_shape)
predictor_series = df[PREDICTORS].values # selected features
predicted_series = df[PREDICTED_VARIABLE].values # meter
# TO DO: can we take predicted from mdf instead?
for sam in range (0,num_samples):
prev_val = 0
one_sample = predictor_series[sam:sam+STEPS_FORWARD]
for time in range (0,STEPS_FORWARD):
one_period = one_sample[time]
for feat in range (0,NUM_PREDICTORS):
val = one_period[feat]
if np.isnan(val):
val = prev_val
else:
prev_val = val
X[sam,time,feat] = val
for time in range (0,STEPS_FUTURE):
y[sam,time]=predicted_series[sam+STEPS_HISTORY+time]
return X,y
X,y = prepare_for_learning(one_site_weather,one_bldg_meter)
print("X shape:",X.shape)
print("y shape:",y.shape)
# In[8]:
print("X columns:",PREDICTORS)
print("X example:\n",X[100].astype(int))
print("y example:\n",y[100].astype(int))
# In[9]:
def make_RNN():
# The GRU in Keras is optimized for speed on CoLab GPU.
rnn = Sequential([
GRU(16,return_sequences=True,
input_shape=(STEPS_FORWARD,NUM_PREDICTORS)),
GRU(16,return_sequences=True),
GRU(16,return_sequences=False),
Dense(STEPS_FUTURE)
])
rnn.compile(optimizer='adam',loss=MeanSquaredError())
return rnn
# In[12]:
cors = []
one_site_weather = load_weather_for_site(SITE)
for BLDG in SITE_BUILDINGS:
print("Building",BLDG)
one_bldg_meter = load_meter_for_building(BLDG)
count_bad = one_bldg_meter[PREDICTED_VARIABLE].isna().sum()
MAX_BAD = 500
if count_bad<=MAX_BAD:
# Must get rid of Nan labels, else loss hits NaN during training.
print(" Count bad values before:",count_bad)
pseudovalue = one_bldg_meter[PREDICTED_VARIABLE].mean()
one_bldg_meter = one_bldg_meter.fillna(pseudovalue)
count_bad = one_bldg_meter[PREDICTED_VARIABLE].isna().sum()
print(" Count bad values after:",count_bad)
#
X,y = prepare_for_learning(one_site_weather,one_bldg_meter)
split = len(X)//2 # year 1 vs year 2
X_train = np.asarray(X[0:split])
y_train = np.asarray(y[0:split])
X_test = np.asarray(X[split:])
y_test = np.asarray(y[split:])
model = make_RNN()
print(model.summary())
#print("Example X train:\n",X_train[example].astype(int))
example=411
print("Example y train:\n",y_train[example].astype(int))
model.fit(X_train,y_train,epochs=EPOCHS)
# Keep a table for reporting later.
y_pred = model.predict(X_test)
rmse = mean_squared_error(y_test,y_pred,squared=False)
mean = one_bldg_meter[PREDICTED_VARIABLE].mean()
cors.append([mean,rmse,rmse/mean,BLDG])
print("mean,rmse,rmse/mean,bldg:",mean,rmse,rmse/mean,BLDG)
for hr in range(0,24,2):
print("Example prediction:\n",hr,y_pred[example+hr].astype(int))
print()
print("History",STEPS_HISTORY,"Future",STEPS_FUTURE)
print("Column 1: Mean usage.")
print("Column 2: RMSE of LinearRegression(X=Weather, y=Usage).")
print("Column 3: RMSE/mean normalized to help understand RMSE.")
print("Column 4: Building.")
for cor in sorted(cors):
print("%10.2f %10.2f %5.2f %s"%(cor[0],cor[1],cor[2],cor[3]))
# In[12]:
# In[12]:
| [
"jmill02@shepherd.edu"
] | jmill02@shepherd.edu |
ebac8572ad03da254a425a4a75b0e61974e3b761 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03385/s603024800.py | 7a526ca41c02972a2a71f7357059eef123871f20 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | s = input()
ans = True
if "a" not in s:
ans = False
if "b" not in s:
ans = False
if "c" not in s:
ans = False
if ans:
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
973c0bedba67067c54281cdc095a2919b8cf0883 | bd696223aaf5404987df11832b4c17c916b9690f | /nlp_sample/deep_zero_nlp/ch07/train_better_seq2seq.py | 50920b13d185794e98201df4a04b802eb0f7c886 | [] | no_license | wararaki718/scrapbox3 | 000a285477f25c1e8a4b6017b6ad06c76f173342 | 9be5dc879a33a1988d9f6611307c499eec125dc2 | refs/heads/master | 2023-06-16T08:46:32.879231 | 2021-07-17T14:12:54 | 2021-07-17T14:12:54 | 280,590,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | import numpy as np
from dataset import sequence
from optimizer import Adam
from trainer import Trainer
from util import eval_seq2seq
from peeky_seq2seq import PeekySeq2seq
def main() -> None:
(x_train, t_train), (x_test, t_test) = sequence.load_data('addition.txt')
x_train, x_test = x_train[:, ::-1], x_test[:, ::-1]
char_to_id, id_to_char = sequence.get_vocab()
vocab_size = len(char_to_id)
wordvec_size = 16
hidden_size = 128
batch_size = 128
max_epoch = 25
max_grad = 5.0
model = PeekySeq2seq(vocab_size, wordvec_size, hidden_size)
optimizer = Adam()
trainer = Trainer(model, optimizer)
acc_list = []
for epoch in range(1, max_epoch+1):
trainer.fit(x_train, t_train, max_epoch=1, batch_size=batch_size, max_grad=max_grad)
correct_num = 0
for i in range(len(x_test)):
question, correct = x_test[[i]], t_test[[i]]
verbose = i < 10
correct_num += eval_seq2seq(model, question, correct, id_to_char, verbose)
acc = float(correct_num) / len(x_test)
acc_list.append(acc)
print(f'val acc {acc*100}%')
print('DONE')
if __name__ == '__main__':
main()
| [
"ky7.ott.w@gmail.com"
] | ky7.ott.w@gmail.com |
fa413f4d8bb5301fad59fb5a048130fde9b3e8b9 | bfa44aa7f6a54a9b60c5ff545897787b15e13c61 | /webcart/users/signals.py | 65e2687c2a7bb1d6aabb4b8ac6bc39735bdb8bb6 | [] | no_license | BabGee/foodcart | e26ef7dbb19df9019c45c623d39e6612f56c8e27 | 30c213007313349af20e6efd650e1c71696ee14a | refs/heads/master | 2022-11-27T12:24:11.279614 | 2020-02-22T10:41:41 | 2020-02-22T10:41:41 | 223,042,865 | 1 | 0 | null | 2022-11-22T04:51:01 | 2019-11-20T22:56:50 | Tcl | UTF-8 | Python | false | false | 401 | py | from django.contrib.auth.models import User
from django.db.models import post_save
from django.dispatch import receiver
from .models import Profile
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save()
| [
"you@example.com"
] | you@example.com |
768c574e336ec16dd3c35ce7b0d914c8cd847468 | e46e8ae7c95d16ce69bd21159335ab86bc2415cf | /Chapter2 recurrence/all_range.py | 30f51a965f3127acd33384f06420cb5e435672ea | [] | no_license | HuichuanLI/play-with-data-structure-python | 7f15ca1e679c1f2287a147e6472ebcebd96b235f | b36436efc6af4b1865dacc79db5fa7160eab9d6e | refs/heads/master | 2023-05-25T19:08:01.745918 | 2023-05-16T17:30:40 | 2023-05-16T17:30:40 | 210,399,837 | 32 | 8 | null | null | null | null | UTF-8 | Python | false | false | 578 | py | data_list = [1, 2, 3, 4]
arranges = []
total = 0
def search(depth, datas):
if depth == len(data_list) + 1:
print(arranges)
global total
total += 1
else:
for element in datas:
# 1.设置现场
arranges.append(element)
next_datas = datas[:]
next_datas.remove(element)
# 2.递归
search(depth + 1, next_datas)
# 3.恢复现场
arranges.pop()
if __name__ == "__main__":
search(1, data_list)
print("有{}排列方式".format(total))
| [
"lhc14124908@163.com"
] | lhc14124908@163.com |
16991783d0b774c9275a5c191e4b92ce33c85c6d | e79b7882427836346ed6b5fe07506ab6210228c8 | /seq2class/data_input.py | 35006e6f6688c814c51c8c88fa841fffd8d76517 | [] | no_license | feizhihui/standard-logistic-regression | d6ecc03a717dfcac2dd93a78bb5b61c24a755cfa | f54f758f9122e1a0668525a1f395e5aac2d5e0ba | refs/heads/master | 2021-09-04T20:08:54.907030 | 2018-01-22T02:33:02 | 2018-01-22T02:33:02 | 108,926,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,113 | py | # encoding=utf-8
import numpy as np
class DataMaster(object):
# ==============
def __init__(self, train_mode=True):
if train_mode:
filename = '../Data/ecoli_modifications.gff'
# filename = '../Data/lambda_modifications.gff'
cat_idx, seq_idx = 4, 10
# cat_idx, seq_idx = 2, 8
else:
filename = '../Data/lambda_modifications.gff'
# filename = '../Data/ecoli_modifications.gff'
cat_idx, seq_idx = 2, 8
# cat_idx, seq_idx = 4, 10
with open(filename, 'r') as file:
train_x, train_y, train_c = [], [], []
for row in file.readlines()[4:]:
cols = row.split()
cat, seq = cols[cat_idx], cols[seq_idx].split(";")[1][-41:]
if seq.endswith("N"):
continue
# print(seq, cat)
# assert seq[20] == "A" or seq[20] == "C", "Error:" + seq[20]
train_x.append(self.seq2matrix(seq))
if cat == "modified_base":
train_y.append(0)
else:
train_y.append(1)
train_c.append(cat)
print('Data input completed filename=', filename)
self.datasets = np.array(train_x, dtype=np.float32)
self.datalabels = np.array(train_y, dtype=np.int32)
self.datacat = np.array(train_y, dtype=np.str)
print("availabel data numbers", str(len(self.datalabels)))
if train_mode:
self.pos_idx = (self.datalabels == 1).reshape(-1)
self.neg_idx = (self.datalabels == 0).reshape(-1)
self.datasize = len(self.datalabels[self.pos_idx]) * 2
print("positive data numbers", str(self.datasize // 2))
else:
self.datasize = len(self.datalabels)
# AGCT=>0123
def seq2matrix(self, line):
seq_arr = np.zeros([41])
for j, c in enumerate(line):
if c == 'A':
seq_arr[j] = 0
elif c == 'G':
seq_arr[j] = 1
elif c == 'C':
seq_arr[j] = 2
elif c == 'T':
seq_arr[j] = 3
else:
raise BaseException("Character Exceptin:" + c)
return seq_arr
def shuffle(self):
mark = list(range(self.datasize // 2))
np.random.shuffle(mark)
self.train_x = np.concatenate([self.datasets[self.pos_idx], self.datasets[self.neg_idx][mark]])
self.train_y = np.concatenate([self.datalabels[self.pos_idx], self.datalabels[self.neg_idx][mark]])
self.train_c = np.concatenate([self.datacat[self.pos_idx], self.datacat[self.neg_idx][mark]])
mark = list(range(self.datasize))
np.random.shuffle(mark)
self.train_x = self.train_x[mark]
self.train_y = self.train_y[mark]
self.train_c = self.train_c[mark]
# marks = list(range(len(self.datasets)))
# self.train_x = self.datasets[marks]
# self.train_y = self.datalabels[marks]
if __name__ == '__main__':
DataMaster()
| [
"helloworld@csu.edu.cn"
] | helloworld@csu.edu.cn |
22d2c3c3f83ddb99fb4cac35a591c5d7abff3324 | 60cf5de97160c0c104b447879edd0ea1ca9724e8 | /q9.py | 1c3aa54c1c05b472f9496d5158c1dc41c9d979cc | [] | no_license | VinayHaryan/String | 6f6b7924ab87ac8ea5509edefaa3aeda795b0de0 | 089dcf02a8d26afcae0ac2b23c640be5a6079095 | refs/heads/main | 2023-05-27T22:15:31.792837 | 2021-06-17T08:39:42 | 2021-06-17T08:39:42 | 377,736,749 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | '''
CONVERT A LIST OF CHARACTERS INTO A STRING
Given a list of characters, merge all of them into a string.
Examples:
Input : ['g', 'e', 'e', 'k', 's', 'f', 'o',
'r', 'g', 'e', 'e', 'k', 's']
Output : geeksforgeeks
Input : ['p', 'r', 'o', 'g', 'r', 'a', 'm',
'm', 'i', 'n', 'g']
Output : programming
'''
S = ['g', 'e', 'e', 'k', 's', 'f', 'o', 'r', 'g', 'e', 'e', 'k', 's']
print(''.join(S)) | [
"noreply@github.com"
] | VinayHaryan.noreply@github.com |
892b7d8bf3495d401e44eb57611f36916d1b43a7 | fbfd4efc9f879f90c194aaefe6217a314737483e | /lib/python3.6/site-packages/pytablereader/spreadsheet/excelloader.py | 4c7a6c47870aa80cc986c122a450bdcbc578913c | [] | no_license | EricSchles/pshtt_command | f3519da6def9b42afd7be274f6910be5086a0c6d | a1a01c95aad54fbf124483e3b34cce02ce8eb1d7 | refs/heads/master | 2021-01-20T11:38:32.529084 | 2017-08-28T18:56:34 | 2017-08-28T18:56:34 | 101,676,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,665 | py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import xlrd
from six.moves import range
from .._logger import FileSourceLogger
from .._validator import FileValidator
from ..error import InvalidDataError
from ..error import OpenError
from ..tabledata import TableData
from .core import SpreadSheetLoader
class ExcelTableFileLoader(SpreadSheetLoader):
"""
A file loader class to extract tabular data from Microsoft Excel |TM|
files.
:param str file_path: Path to the loading Excel workbook file.
.. py:attribute:: table_name
Table name string. Defaults to ``%(sheet)s``.
.. py:attribute:: start_row
The first row to search header row.
"""
@property
def format_name(self):
return "excel"
@property
def _sheet_name(self):
return self._worksheet.name
@property
def _row_count(self):
return self._worksheet.nrows
@property
def _col_count(self):
return self._worksheet.ncols
def __init__(self, file_path=None):
super(ExcelTableFileLoader, self).__init__(file_path)
self._validator = FileValidator(file_path)
self._logger = FileSourceLogger(self)
def load(self):
"""
Extract tabular data as |TableData| instances from an Excel file.
|spreadsheet_load_desc|
:return:
Loaded |TableData| iterator.
|TableData| created for each sheet in the workbook.
|load_table_name_desc|
=================== ====================================
Format specifier Value after the replacement
=================== ====================================
``%(filename)s`` Filename of the workbook
``%(sheet)s`` Name of the sheet
``%(format_name)s`` ``"spreadsheet"``
``%(format_id)s`` |format_id_desc|
``%(global_id)s`` |global_id|
=================== ====================================
:rtype: |TableData| iterator
:raises pytablereader.error.InvalidDataError:
If the header row is not found.
:raises pytablereader.error.OpenError:
If failed to open the source file.
"""
self._validate()
self._logger.logging_load()
try:
workbook = xlrd.open_workbook(self.source)
except xlrd.biffh.XLRDError as e:
raise OpenError(e)
for worksheet in workbook.sheets():
self._worksheet = worksheet
if self._is_empty_sheet():
continue
self.__extract_not_empty_col_idx()
try:
start_row_idx = self._get_start_row_idx()
except InvalidDataError:
continue
header_list = self.__get_row_values(start_row_idx)
record_list = [
self.__get_row_values(row_idx)
for row_idx in range(start_row_idx + 1, self._row_count)
]
self.inc_table_count()
yield TableData(
self._make_table_name(), header_list, record_list,
is_strip_quote=True)
def _is_empty_sheet(self):
return any([
self._col_count == 0,
self._row_count <= 1,
# nrows == 1 means exists header row only
])
def _get_start_row_idx(self):
for row_idx in range(self.start_row, self._row_count):
if self.__is_header_row(row_idx):
break
else:
raise InvalidDataError("header row not found")
return row_idx
def __is_header_row(self, row_idx):
cell_type_list = self._worksheet.row_types(
row_idx, self._start_col_idx, self._end_col_idx + 1)
return xlrd.XL_CELL_EMPTY not in cell_type_list
@staticmethod
def __is_empty_cell_type_list(cell_type_list):
return all([
cell_type == xlrd.XL_CELL_EMPTY
for cell_type in cell_type_list
])
def __extract_not_empty_col_idx(self):
col_idx_list = [
col_idx
for col_idx in range(self._col_count)
if not self.__is_empty_cell_type_list(
self._worksheet.col_types(col_idx))
]
self._start_col_idx = min(col_idx_list)
self._end_col_idx = max(col_idx_list)
def __get_row_values(self, row_idx):
return self._worksheet.row_values(
row_idx, self._start_col_idx, self._end_col_idx + 1)
| [
"ericschles@gmail.com"
] | ericschles@gmail.com |
44cc6301ab5207e6ca94e5f9b3f11c3fa2c5d5df | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02661/s571444851.py | 19bb23426d999efd3a0308fcee53bb7a6337cc35 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | N = int(input())
A = []
B = []
for i in range(N):
a, b = list(map(int, input().split()))
A.append(a)
B.append(b)
A = sorted(A)
B = sorted(B)
if N % 2:
ans = B[N//2] - A[N//2] + 1
else:
l = A[N//2-1] + A[N//2]
r = B[N//2-1] + B[N//2]
ans = r-l+1
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
0885f9deabdbc6da3ce5bc00b3bdbd05693f715c | 76a8ea60480331f0f61aeb61de55be9a6270e733 | /downloadable-site-packages/statsmodels/tsa/arima/estimators/burg.py | 13bff501b0145d03d78b3770b04644aef1090db9 | [
"MIT"
] | permissive | bhagyas/Pyto | cd2ec3f35bec703db4ac29b56d17abc4bf03e375 | 907024a9b3e04a2a9de54976778c0e1a56b7b83c | refs/heads/master | 2022-11-19T13:05:07.392454 | 2020-07-21T17:33:39 | 2020-07-21T17:33:39 | 281,886,535 | 2 | 0 | MIT | 2020-07-23T07:48:03 | 2020-07-23T07:48:02 | null | UTF-8 | Python | false | false | 2,286 | py | """
Burg's method for estimating AR(p) model parameters.
Author: Chad Fulton
License: BSD-3
"""
import numpy as np
from statsmodels.tools.tools import Bunch
from statsmodels.regression import linear_model
from statsmodels.tsa.arima.specification import SARIMAXSpecification
from statsmodels.tsa.arima.params import SARIMAXParams
def burg(endog, ar_order=0, demean=True):
"""
Estimate AR parameters using Burg technique.
Parameters
----------
endog : array_like or SARIMAXSpecification
Input time series array, assumed to be stationary.
ar_order : int, optional
Autoregressive order. Default is 0.
demean : bool, optional
Whether to estimate and remove the mean from the process prior to
fitting the autoregressive coefficients.
Returns
-------
parameters : SARIMAXParams object
Contains the parameter estimates from the final iteration.
other_results : Bunch
Includes one component, `spec`, which is the `SARIMAXSpecification`
instance corresponding to the input arguments.
Notes
-----
The primary reference is [1]_, section 5.1.2.
This procedure assumes that the series is stationary.
This function is a light wrapper around `statsmodels.linear_model.burg`.
References
----------
.. [1] Brockwell, Peter J., and Richard A. Davis. 2016.
Introduction to Time Series and Forecasting. Springer.
"""
spec = SARIMAXSpecification(endog, ar_order=ar_order)
endog = spec.endog
# Workaround for statsmodels.tsa.stattools.pacf_burg which doesn't work
# on integer input
# TODO: remove when possible
if np.issubdtype(endog.dtype, np.dtype(int)):
endog = endog * 1.0
if not spec.is_ar_consecutive:
raise ValueError('Burg estimation unavailable for models with'
' seasonal or otherwise non-consecutive AR orders.')
p = SARIMAXParams(spec=spec)
if ar_order == 0:
p.sigma2 = np.var(endog)
else:
p.ar_params, p.sigma2 = linear_model.burg(endog, order=ar_order,
demean=demean)
# Construct other results
other_results = Bunch({
'spec': spec,
})
return p, other_results
| [
"adrilabbelol@gmail.com"
] | adrilabbelol@gmail.com |
96580f873da4af76eeaa0a7a2437894bdc7df269 | 67ed96b8b4762c78c8f96d16bae2076e30dc184d | /CMSSW_5_3_20/src/flowAnalysis/SkimTrack/test/crab/tracktc/anaM185150/crab_pPbrereco.py | 8cd44ec51eb369b9e8a3f23ae7c6680aa1c168ef | [] | no_license | XuQiao/HI | 4dae1dcf600d0ea64ea26403197135790ba3c4a2 | e963cd9a5a393480e83e697b37327f4f7c4de8d4 | refs/heads/master | 2020-12-25T16:53:40.474495 | 2017-02-16T06:00:17 | 2017-02-16T06:00:17 | 64,085,142 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | from WMCore.Configuration import Configuration
config = Configuration()
config.section_('General')
config.General.transferOutputs = True
config.General.requestName = 'FlowLYZHMpPbReReco'
config.section_('JobType')
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'skimtrack_185150_cfi.py'
config.JobType.outputFiles = ['skimTreeTrack.root']
config.section_('Data')
config.Data.inputDBS = 'phys03'
config.Data.inputDataset = '/PAHighPt/davidlw-PA2013_FlowCorr_PromptReco_TrkHM_Gplus_Rereco_ReTracking_v18-28b2b9cce04ec3f20baeb96fbd2295a8/USER'
#config.Data.lumiMask = ''
config.Data.splitting = 'LumiBased'
config.Data.unitsPerJob = 50
#config.Data.runRange = '193093-193999'
#config.Data.publishDBS = 'https://cmsweb.cern.ch/dbs/prod/phys03/DBSWriter/'
#config.Data.inputDBS = 'https://cmsweb.cern.ch/dbs/prod/global/DBSReader/'
config.Data.publication = False
#config.Data.publishDataName = ''
config.Data.outLFNDirBase = '/store/user/qixu/flow/PACorrHM/skim/tracktc/multiM185150/FlowLYZHMpPbReReco'
config.section_('Site')
config.Site.storageSite = 'T2_US_Vanderbilt'
| [
"qixu@cern.ch"
] | qixu@cern.ch |
4ec1cd9429ade9f0f19dfd8668f57e95449097d2 | 214c4a79fd77008bf688aa2fc8bafdff8a80780b | /src/spring/azext_spring/vendored_sdks/appplatform/v2022_04_01/_configuration.py | c776f7eae7d56275c15a1602afefcb70b5d0b14a | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | openapi-env-test/azure-cli-extensions | abb8bcf005a5b75ee47a12745a958bcb64996a44 | d4ffc4100d82af66c4f88e9401397c66b85bd501 | refs/heads/master | 2022-09-15T18:08:46.848581 | 2022-08-09T06:09:14 | 2022-08-09T06:09:14 | 243,698,123 | 0 | 1 | MIT | 2020-11-10T06:59:47 | 2020-02-28T06:53:57 | Python | UTF-8 | Python | false | false | 3,668 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class AppPlatformManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for AppPlatformManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Gets subscription ID which uniquely identify the Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(AppPlatformManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2022-04-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-appplatform/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| [
"noreply@github.com"
] | openapi-env-test.noreply@github.com |
6d4b092383c9317c609ea382bc8fb36cc34e6ff7 | d9e7bd5f582dd3d1a63fb10197896d462ce49027 | /numpy/arrays1.py | cd7171f3ef68e24bfb1c1eb73f84987129ff02d1 | [] | no_license | abhinai96/Python_conceptual_based_programs | 137aa8d4c1354ba7586f7ec2dea6683109cf9393 | 795883b28389ae2b0c46ddacea493530f40774a6 | refs/heads/master | 2022-12-15T11:57:28.862114 | 2020-09-15T03:10:35 | 2020-09-15T03:10:35 | 295,593,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | """import numpy as np
a=np.array([[1,2,3],[5,2,1],[3,4,5]])
print(a)"""
"""import numpy as np
a=np.array([[1,2,3],[4,5,6]])
b=a.diagonal()
print(b)"""
"""import numpy as np
a=np.array([[1,2,3],[4,5,6]])
b=np.delete(a,1,axis=1)
print(b)"""
"""# A basic code for matrix input from user
R = int(input("Enter the number of rows:"))
C = int(input("Enter the number of columns:"))
# Initialize matrix
matrix = []
print("Enter the entries rowwise:")
# For user input
for i in range(R): # A for loop for row entries
a =[]
for j in range(C): # A for loop for column entries
a.append(int(input()))
matrix.append(a)
# For printing the matrix
for i in range(R):
for j in range(C):
print(matrix[i][j], end = " ")
print()"""
| [
"noreply@github.com"
] | abhinai96.noreply@github.com |
55fa7227b63e17cf878b0681f01929bab06e011c | ec00584ab288267a7cf46c5cd4f76bbec1c70a6b | /Python/__Data structure/__List/__sorting/is_palindrome.py | 4bf8f9ced8a4ee03328a9eb29a0597957fff6ee3 | [] | no_license | rahuldbhadange/Python | b4cc806ff23953389c9507f43d817b3815260e19 | 7e162117f1acc12537c7eeb36d6983d804122ff3 | refs/heads/master | 2021-06-23T05:04:20.053777 | 2020-01-28T10:34:28 | 2020-01-28T10:34:28 | 217,307,612 | 0 | 0 | null | 2021-06-10T22:44:11 | 2019-10-24T13:35:42 | Python | UTF-8 | Python | false | false | 513 | py | def is_palindrome(word):
q = len(word) - 1
# print(word)
for p in word:
# print("p:", p)
# print("q:", word[q])
if p == word[q]:
# print(p, word[q])
q = q - 1
# print("Yes !!!")
else:
# print("No !!!", p, word[q])
return "No !!!", p, word[q] # ***important
break
# return "Yes !!!"
ans, p, q = is_palindrome('delevelid')
if ans == None:
print("Yes !!!")
else:
print(ans, p, q)
| [
"46024570+rahuldbhadange@users.noreply.github.com"
] | 46024570+rahuldbhadange@users.noreply.github.com |
ef9b3d8b1e318bffc12724be8704c0874cabf335 | 2fac796fa58c67fb5a4a95a6e7f28cbef169318b | /python/drop-eggs.py | 8f4ad841c4590fea0575ac1b7cbb9419816ca206 | [] | no_license | jwyx3/practices | f3fe087432e79c8e34f3af3a78dd10278b66dd38 | 6fec95b9b4d735727160905e754a698513bfb7d8 | refs/heads/master | 2021-03-12T20:41:59.816448 | 2019-04-14T06:47:30 | 2019-04-14T06:47:30 | 18,814,777 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | class Solution:
# @param {int} n an integer
# @return {int} an integer
# NOTE: remember solution!!
def dropEggs(self, n):
# Write your code here
ans = 0
while ans * (ans + 1) / 2 < n:
ans += 1
return ans
def dropEggs(self, n):
import math
# the min integer meet: ans * (ans + 1) / 2 >= n
return int(math.ceil(math.sqrt(2 * n + 0.25) - 0.5))
# another solution is DP which will also apply to dropEggs II
| [
"jwyx88003@gmail.com"
] | jwyx88003@gmail.com |
efee169b60626091e6edbb58979c264141b84775 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_6_mask_unet/mask_5_7a_tv_sobel/tv_s004_sobel_k5_s080/step12 .py | 3efac9d72a8c90fca1e3e55139426debea4e8184 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,248 | py | '''
目前只有 step12 一定需要切換資料夾到 該komg_model所在的資料夾 才能執行喔!
'''
if(__name__ == "__main__"):
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step12_result_analyzer import Row_col_exps_analyzer
from step11 import *
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir)
##########################################################################################################################################################################################################################################################################################
ana_dir = template_dir
##########################################################################################################################################################################################################################################################################################
"""
以下留下一些example這樣子
core_amount == 7 是因為 目前 see_amount == 7 ,想 一個core 一個see
task_amount == 7 是因為 目前 see_amount == 7
single_see_multiprocess == True 代表 see內 還要 切 multiprocess,
single_see_core_amount == 2 代表切2分
所以總共會有 7*2 = 14 份 process 要同時處理,
但建議不要用,已經測過,爆記憶體了
"""
### 直接看 dtd_hdr_mix 的狀況
#################################################################################################################################################################################################################
ana_name = "1_ch"
ch_analyze = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what="mask",
row_col_results=[mask_tv_s004_sobel_k5_s080_ch[:4],
mask_tv_s004_sobel_k5_s080_ch[4:]], show_in_img=False, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=512, img_w=512)\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=True, single_see_core_amount=6)
############################################
ana_name = "2_ep"
ep_analyze = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what="mask",
row_col_results=[mask_tv_s004_sobel_k5_s080_ep[:8],
mask_tv_s004_sobel_k5_s080_ep[8:]], show_in_img=False, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=512, img_w=512)\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=True, single_see_core_amount=6)
############################################
ana_name = "3_noC"
noC_and_add_analyze = Row_col_exps_analyzer(ana_describe=f"{ana_dir}/0_ana_{ana_name}",
ana_what="mask",
row_col_results=[mask_tv_s004_sobel_k5_s080_noC_and_add[:3] + [mask_tv_s004_sobel_k5_s080_ch[2]],
mask_tv_s004_sobel_k5_s080_noC_and_add[3:] + [mask_tv_s004_sobel_k5_s080_ch[2]]], show_in_img=False, show_gt_img=False, bgr2rgb=True, add_loss=False, img_h=512, img_w=512)\
# .analyze_row_col_results_all_single_see(single_see_multiprocess=True, single_see_core_amount=6)
################################################################################################################################################################################################################# | [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
ad27de88b1a43ef76f350d6be322d8b7a45a4a3f | f5d77defeaa244ed8df517d13f21cd6f073adebc | /programas/Laboratorios/8_Archivos/uneArchivos.py | 2b1e581415d9988aa9a5f139b75f649a06d7766b | [] | no_license | lizetheP/PensamientoC | 02a02cf6d537e1637a933a4f3957995f6690d7d6 | 5d5740e0afa4fc487fdc5f2c466df63e9b4a664f | refs/heads/master | 2023-08-10T08:07:09.604983 | 2023-08-08T16:53:10 | 2023-08-08T16:53:10 | 200,893,283 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | def uneArchivos(origen, destino):
file1 = open(origen, "r")
file2 = open(destino, "a")
while True:
letra = file1.read(1)
file2.write(letra)
if not letra:
break
file1.close()
file2.close()
nombre = str(input("Introduce el nombre del archivo origen: "))
nombre2 = str(input("Introduce el nombre del archivo destino: "))
uneArchivos(nombre, nombre2) | [
"lizetheperez@gmail.com"
] | lizetheperez@gmail.com |
784bb7180ef6d6cee3341ea995174a71a8b217e1 | a4e502e9487cf17c53f9f931ec0dbc12168fea52 | /packages/pyre/tracking/File.py | 4c9369af2e3cfd664a739d1c24e5d9541b5c50d4 | [
"BSD-3-Clause"
] | permissive | bryanvriel/pyre | bdc5dd59c46d53ff81f2ece532b9073ac3b65be1 | 179359634a7091979cced427b6133dd0ec4726ea | refs/heads/master | 2021-09-28T00:10:26.454282 | 2018-11-11T16:42:07 | 2018-11-11T16:42:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
# declaration
class File:
"""
A locator that records a position within a file
"""
# meta methods
def __init__(self, source, line=None, column=None):
# save my info
self.source = source
self.line = line
self.column = column
# all done
return
def __str__(self):
text = [
"file={!r}".format(str(self.source))
]
if self.line is not None:
text.append("line={.line!r}".format(self))
if self.column is not None:
text.append("column={.column!r}".format(self))
return ", ".join(text)
# implementation details
__slots__ = "source", "line", "column"
# end of file
| [
"michael.aivazis@orthologue.com"
] | michael.aivazis@orthologue.com |
f2e6792f3d5959656ede35df98bb81357ceeaa40 | 5d1fe71ab6ca5810680039552e2b7c884c212738 | /jackdaw/utils/bhimport.py | 8a34b8a08e3b86a8089b879efc2fe97e7686d6ac | [] | no_license | zimshk/jackdaw | 76977c516a1426302840ec63659bdf0224898cbd | 86d927a0a1a56d0d8685513df7c41afb21e7c521 | refs/heads/master | 2022-12-26T04:32:06.674672 | 2020-10-05T09:54:00 | 2020-10-05T09:54:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,196 | py | import zipfile
import json
import codecs
import pprint
from jackdaw.dbmodel import *
from jackdaw.dbmodel.adinfo import ADInfo
from jackdaw.dbmodel.adcomp import Machine
from jackdaw.dbmodel.aduser import ADUser
from jackdaw.dbmodel.adgroup import Group
from jackdaw.dbmodel.adou import ADOU
class BHImport:
def __init__(self, db_conn = None, db_session = None):
self.zipfile = None
self.files = None
self.db_conn = db_conn
self.db_session = db_session
self.is_zip = False
self.fd = {}
self.ads = {}
self.adn = {} #name -> ad_id
#self.setup_db()
def setup_db(self):
if self.db_session is None:
self.db_session = get_session(self.db_conn)
def import_machines(self):
print('Importing machines!')
for machine in self.get_file('computers')['computers']:
#pprint.pprint(machine)
#input()
m = Machine()
m.ad_id = self.ads[machine['Properties']['objectsid'].rsplit('-',1)[0]]
m.sAMAccountName = machine['Name'].split('.', 1)[0] + '$'
m.objectSid = machine['Properties']['objectsid']
m.description = machine['Properties']['description']
m.operatingSystemVersion = machine['Properties']['operatingsystem']
self.db_session.add(m)
self.db_session.commit()
def import_users(self):
print('Importing users!')
for user in self.get_file('users')['users']:
#pprint.pprint(user)
#input()
m = ADUser()
m.ad_id = self.ads[user['Properties']['objectsid'].rsplit('-',1)[0]]
m.name = user['Name'].split('@', 1)[0]
m.objectSid = user['Properties']['objectsid']
m.description = user['Properties']['description']
m.displayName = user['Properties']['displayname']
m.email = user['Properties']['email']
self.db_session.add(m)
self.db_session.commit()
def import_sessions(self):
print('Importing sessions!')
for session in self.get_file('sessions')['sessions']:
#pprint.pprint(session)
#input()
try:
if session['ComputerName'].startswith('['):
continue
ad_name = session['UserName'].rsplit('@', 1)[1]
cname = session['ComputerName'] + '$'
if session['ComputerName'].find('.') != -1:
cname = session['ComputerName'].split('.', 1)[0] + '$'
qry = self.db_session.query(
Machine.id
).filter_by(ad_id = self.adn[ad_name]
).filter(Machine.sAMAccountName == cname
)
machine_id = qry.first()
if machine_id is None:
raise Exception('Could not find machine!')
m = NetSession()
m.machine_id = machine_id[0]
m.username = session['UserName'].split('@', 1)[0]
self.db_session.add(m)
except Exception as e:
#print(e)
#pprint.pprint(session)
#input()
continue
self.db_session.commit()
def import_ous(self):
print('Importing ous!')
for ou in self.get_file('ous')['ous']:
#pprint.pprint(groups)
#input()
try:
ad_name = ou['Name'].rsplit('@', 1)[1]
m = ADOU()
m.ad_id = self.adn[ad_name]
m.name = ou['Name'].split('@', 1)[0]
m.objectSid = ou['Properties']['objectsid']
m.description = ou['Properties'].get('description', None)
self.db_session.add(m)
except Exception as e:
print(e)
pprint.pprint(ou)
input()
continue
self.db_session.commit()
def import_domains(self):
print('Importing domains!')
for domain in self.get_file('domains')['domains']:#['computers']:
#pprint.pprint(domain)
#input()
di = ADInfo()
di.name = domain['Name']
di.objectSid = domain['Properties']['objectsid']
self.db_session.add(di)
self.db_session.commit()
self.db_session.refresh(di)
self.ad_id = di.id
self.ads[di.objectSid] = di.id
self.adn[di.name] = di.id
def import_gpos(self):
print('Importing gpos!')
for gpo in self.get_file('gpos')['gpos']:
pprint.pprint(gpo)
input()
try:
ad_name = ou['Name'].rsplit('@', 1)[1]
m = ADOU()
m.ad_id = self.adn[ad_name]
m.name = ou['Name'].split('@', 1)[0]
m.objectSid = ou['Properties']['objectsid']
m.description = ou['Properties'].get('description', None)
self.db_session.add(m)
except Exception as e:
print(e)
pprint.pprint(ou)
input()
continue
self.db_session.commit()
def import_groups(self):
print('Importing groups!')
for groups in self.get_file('groups')['groups']:
#pprint.pprint(groups)
#input()
try:
ad_name = groups['Name'].rsplit('@', 1)[1]
m = Group()
m.ad_id = self.adn[ad_name]
m.name = groups['Name'].split('@', 1)[0]
m.objectSid = groups['Properties']['objectsid']
m.description = groups['Properties'].get('description', None)
self.db_session.add(m)
except Exception as e:
print(e)
pprint.pprint(groups)
input()
continue
self.db_session.commit()
def get_file(self, filetype):
if self.is_zip is True:
with zipfile.ZipFile(filepath, 'r') as zf:
with zf.open(self.fd[filetype]) as data:
return json.load(data)
@staticmethod
def from_zipfile(filepath):
bh = BHImport()
if not zipfile.is_zipfile(filepath):
raise Exception('The file on this path doesnt look like a valid zip file! %s' % filepath)
bh.is_zip = True
zip = zipfile.ZipFile(filepath, 'r')
for filename in zip.namelist():
if filename.find('_computers.json') != -1:
bh.fd['computers'] = filename
elif filename.find('_domains.json') != -1:
bh.fd['domains'] = filename
elif filename.find('_gpos.json') != -1:
bh.fd['gpos'] = filename
elif filename.find('_groups.json') != -1:
bh.fd['groups'] = filename
elif filename.find('_ous.json') != -1:
bh.fd['ous'] = filename
elif filename.find('_sessions.json') != -1:
bh.fd['sessions'] = filename
elif filename.find('_users.json') != -1:
bh.fd['users'] = filename
return bh
def from_folder(self, folderpath):
pass
def run(self):
#DO NOT CHANGE THIS ORDER!!!!
self.setup_db()
self.import_domains()
#self.import_groups()
#self.import_machines()
#self.import_users()
#self.import_sessions()
self.import_gpos()
#self.import_ous() #not working!
if __name__ == '__main__':
import sys
db_conn = 'sqlite:///bhtest.db'
filepath = sys.argv[1]
create_db(db_conn)
bh = BHImport.from_zipfile(filepath)
bh.db_conn = db_conn
bh.run()
| [
"info@skelsec.com"
] | info@skelsec.com |
c3ae3203d8281f0e944b529adfd94d0da0039498 | d08cf46d3e16ab8e6a958731168469ba38daf069 | /tests/test_la.py | b60f2699fb65d8f09970ee2b497d1629a323508c | [
"BSD-2-Clause"
] | permissive | spectralDNS/shenfun | ce808edc5258c896f2cccfbd88e67153e3f621c9 | bcda39d8d8e4741df1cafe719d81733cc1024def | refs/heads/master | 2023-07-27T20:29:57.075970 | 2023-07-11T12:33:04 | 2023-07-11T12:33:04 | 79,914,066 | 190 | 46 | BSD-2-Clause | 2022-05-11T19:10:33 | 2017-01-24T13:29:02 | Python | UTF-8 | Python | false | false | 1,261 | py | import numpy as np
import pytest
from shenfun import SparseMatrix, la
import warnings
warnings.filterwarnings('ignore')
N = 10
d = [
{0: np.arange(N)+1},
{0: -2, 2: 1},
{-1: 1, 0: -2, 1: 1},
{-2: 1, 0: -2, 2: 1},
{-2: 1, 0: -2, 2: 1, 4: 0.1},
{-4: 0.1, -2: 1, 0: -2, 2: 1, 4: 0.1}
]
@pytest.mark.parametrize('di', d)
def test_XDMA(di):
"""Testing
- DiagMA
- TwoDMA
- TDMA
- TDMA_O
- FDMA
- PDMA
"""
M = SparseMatrix(di, (N, N))
sol = la.Solver(M)
sol2 = la.Solve(M)
b = np.ones(N)
u_hat = np.zeros_like(b)
u_hat = sol(b, u_hat)
u_hat2 = np.zeros_like(b)
u_hat2 = sol2(b, u_hat2)
assert np.allclose(u_hat2, u_hat)
bh = np.ones((N, N))
uh = np.zeros_like(bh)
uh2 = np.zeros_like(bh)
uh = sol(bh, uh, axis=1)
uh2 = sol(bh, uh2, axis=1)
assert np.allclose(uh2, uh)
assert np.allclose(uh[0], u_hat)
uh = sol(bh, uh, axis=0)
uh2 = sol(bh, uh2, axis=0)
assert np.allclose(uh2, uh)
assert np.allclose(uh[:, 0], u_hat)
if __name__ == "__main__":
#test_solve('GC')
#test_TDMA()
#test_TDMA_O()
#test_DiagMA()
#test_PDMA('GC')
#test_FDMA()
#test_TwoDMA()
test_XDMA(d[1])
| [
"mikaem@math.uio.no"
] | mikaem@math.uio.no |
9e85a07333331e1c9399606d62b5558722bd154b | d2dcc2033847c7a5284b5c4d89a3660b0c21de12 | /applications/sptEditor/src/model/vd/axleCounter.py | 268141ed06aaf5f61f13e37976404529d7829852 | [] | no_license | shaxbee/eu07-spt | c0d27848041ed0511f9f3c32eddc7a6b28877cf9 | 78ae6e9cf4273aa1ca7e05db1326a1587f5eb3f1 | refs/heads/master | 2020-05-19T16:43:34.723930 | 2012-09-26T00:18:31 | 2012-09-26T00:18:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | '''
Created on 2009-09-29
@author: gfirlejczyk
'''
class AxleCounter(object):
'''
Class handling axle counter like point that defines borders of VD Group
'''
def __init__(self, id = None):
self.__id = id
self.__geometryPoint = (0,0,0)
self.__railTrackingID = 0
def __repr__(self):
return 'AxleCounter(id=%s, RailTrackingId=%s, GeometryPoint=%s)' %(
self.__id,
self.__geometryPoint,
self.__railTrackingID)
def setRailTracking(self,railTrackingId):
'''Setting up railTrackingId which is connected to axle counter'''
self.__railTrackingID = railTrackingId
def getRailTracking(self):
'''Returns RailTracking Id which is connected to axle counter'''
return self.__railTrackingID
def setGeometryPoint(self,geometryPoint):
'''Set geometry point in 3d where axle counter is putting down'''
self.__geometryPoint = geometryPoint
def getGeometryPoint(self):
'''Get geometry point in 3d where axle counter is putting down'''
return self.__geometryPoint
def getAxleCounterId(self):
'''Returns axle counter identification number'''
return self.__id | [
"devnull@localhost"
] | devnull@localhost |
bccb08932de8e91329d9be799b262b36c9254568 | 673517e68db4b2540ac3a908a6374aaaa72e0f27 | /synergine/synergy/event/Event.py | 82a31511b1891015a5bb6d189010374c641d9ab2 | [
"Apache-2.0"
] | permissive | buxx/synergine | 3a977b69bc35c1a5af1056f98028f8b7412795d2 | da05d762cdbc993362807d4851e1ca74784438ae | refs/heads/master | 2021-07-03T19:57:24.486164 | 2017-09-04T09:19:45 | 2017-09-04T09:19:45 | 23,734,878 | 6 | 2 | Apache-2.0 | 2021-06-10T14:15:26 | 2014-09-06T13:15:07 | Python | UTF-8 | Python | false | false | 2,423 | py | from synergine.core.exceptions import NotConcernedEvent
from synergine.core.simulation.mechanism.Mechanism import Mechanism
from synergine.cst import COL_ALL
class Event():
"""
Event are called by mechanisms and trig associated actions if conditions matches.
"""
_mechanism = Mechanism
"""Mechanism class who run this event with prepared parameters"""
_concern = COL_ALL
"""The COL id of concerned synergies objects"""
_each_cycle = 1
"""Event ca be executed each x cycle if needed"""
_first_cycle_force = False
"""Event will be executed at first cycle regardless of _each_cycle"""
def __init__(self, actions):
self._actions = actions
@classmethod
def get_mechanism(cls):
"""
:return: Mechanism class who will run this event
:rtype: Mechanism
"""
return cls._mechanism
@classmethod
def get_concern(cls):
"""
:return: COL name if concerned synergies objects
"""
return cls._concern
@classmethod
def get_each_cycle(cls):
"""
:return: The number of each cycle where execute this event
"""
return cls._each_cycle
@classmethod
def is_first_cycle_force(cls):
return cls._first_cycle_force
def observe(self, object_id, context, parameters={}):
"""
Return actions who have to be executed.
:param object_id: The id of observed synergy object
:param context: The Context
:param parameters: Mechanism prepared dict of parameters
:return: list of actions
:rtype: list (of Action)
"""
active_actions = []
try:
parameters = self._prepare(object_id, context, parameters)
for action in self._actions:
action_object = action(object_id, parameters)
active_actions.append(action_object)
except NotConcernedEvent:
pass # Object not concerned by this event
return active_actions
def _prepare(self, object_id, context, parameters={}):
"""
Return dict with parameters for actions
:param object_id: The id of observed synergy object
:param context: The Context
:param parameters: Mechanism prepared dict of parameters
:raise: NotConcernedEvent
:return:
"""
raise NotImplementedError() | [
"sevajol.bastien@gmail.com"
] | sevajol.bastien@gmail.com |
a877700a9eee5373e8e8075a715386a8c0cbcb9f | 7dd30cae84f19aca8125d5cb35b099cb32cbcb64 | /4-Object_Detection/YOLOV3/core/common.py | 75b29aafe3ed6ee1fab993824ad9f7a08b566e12 | [
"MIT"
] | permissive | Ray507/TensorFlow2.0-Examples | a5d7e38c10132a26203a4783cf539741953040a2 | 90037a846411aab5eb0fd6e74e699e8e58c78cc5 | refs/heads/master | 2020-06-21T18:49:46.249163 | 2019-07-18T06:00:06 | 2019-07-18T06:00:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,881 | py | #! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : common.py
# Author : YunYang1994
# Created date: 2019-07-11 23:12:53
# Description :
#
#================================================================
import tensorflow as tf
class BatchNormalization(tf.keras.layers.BatchNormalization):
"""
define BatchNormalization layers in 'tf' style !!!
"""
def call(self, x, training=False):
if not training:
training = tf.constant(False)
training = tf.logical_and(training, self.trainable)
return super().call(x, training)
def convolutional(input_layer, filters_shape, downsample=False, activate=True, bn=True):
if downsample:
input_layer = tf.keras.layers.ZeroPadding2D(((1, 0), (1, 0)))(input_layer)
padding = 'valid'
strides = 2
else:
strides = 1
padding = 'same'
conv = tf.keras.layers.Conv2D(filters=filters_shape[-1], kernel_size = filters_shape[0], strides=strides, padding=padding,
use_bias=not bn, kernel_regularizer=tf.keras.regularizers.l2(0.0005))(input_layer)
if bn: conv = BatchNormalization()(conv)
if activate == True: conv = tf.nn.leaky_relu(conv, alpha=0.1)
return conv
def residual_block(input_layer, input_channel, filter_num1, filter_num2):
short_cut = input_layer
conv = convolutional(input_layer, filters_shape=(1, 1, input_channel, filter_num1))
conv = convolutional(conv , filters_shape=(3, 3, filter_num1, filter_num2))
residual_output = short_cut + conv
return residual_output
def upsample(input_layer):
return tf.image.resize(input_layer, (input_layer.shape[1] * 2, input_layer.shape[2] * 2), method='nearest')
| [
"YunYang1994@github.com"
] | YunYang1994@github.com |
547f748a31131edfdd4d47aab9f0b1066b0077be | 4b87a0de0f43de2bde41f2590faac970c18fe482 | /api/android/v1/daily_salary/views.py | 63b2ac976436c9b036a2c2f97896f3a312229ea6 | [] | no_license | krishSona/testbackend | d0bc325776537d9814b9022b3538b5e8a840e6a4 | d87e050d02542c58876d4f81c2ea99815ab4160e | refs/heads/master | 2023-04-08T01:26:42.070058 | 2021-04-03T06:08:54 | 2021-04-03T06:08:54 | 354,214,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,594 | py | import json
from django.http import JsonResponse
from rest_framework import viewsets, permissions
from rest_framework.response import Response
from api.android.v1.daily_salary.serializers import WorkerSerializer
from workers.models import Worker, Advance
# def session(request):
# phone_number = request.GET.get('phone_number')
# if phone_number is not None:
# _worker = Worker.objects.filter(phone=phone_number).first()
# data = {"status": 200, "id": _worker.id}
# else:
# data = {"status": 400, "message": "आपका खाता पंजीकृत नहीं है। कृपया अपने ठेकेदार से संपर्क करें।"}
# return JsonResponse(data)
# def worker(request):
# worker_id = request.GET.get('worker_id')
# if worker_id is not None:
# _worker = Worker.objects.filter(id=worker_id).first()
# data = {"balance": "₹" + str(_worker.balance)}
# else:
# data = {"status": 400, "message": "आपका खाता पंजीकृत नहीं है। कृपया अपने ठेकेदार से संपर्क करें।"}
# return JsonResponse(data)
def transfer_to_bank(request):
worker_id = request.GET.get('worker_id')
if worker_id is not None:
_worker = Worker.objects.filter(id=worker_id).first()
_balance = _worker.balance
_worker.balance = 0
_worker.save()
_advance = Advance.objects.create(worker_id=_worker.id, account_id=_worker.account_id, amount=_balance,
utr="AXPS7170EHAG23G")
data = {"Bank Name:": _advance.account.ifscode.bank.name, "Bank A/C:": _advance.account.number,
"IFSC:": _advance.account.ifscode.code, "Deposited:": _advance.amount, "Txn. No.:": _advance.utr}
else:
data = {"status": 400, "message": "आपका खाता पंजीकृत नहीं है। कृपया अपने ठेकेदार से संपर्क करें।"}
return JsonResponse(data)
class WorkerViewSet(viewsets.ModelViewSet):
swagger_schema = None
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = Worker.objects.all().order_by('-id')
serializer_class = WorkerSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
queryset = Worker.objects.all().order_by('-id')
phone = self.request.query_params.get('phoneNumber', None)
if phone is not None:
queryset = queryset.filter(phone=phone)
return queryset
class AdvanceViewSet(viewsets.ModelViewSet):
swagger_schema = None
queryset = Advance.objects.all().order_by('-id')
permission_classes = [permissions.IsAuthenticated]
def create(self, request):
worker_id = request.data.get('workerId')
_advance = Advance()
# if worker_id is not None:
_worker = Worker.objects.filter(id=worker_id).first()
if _worker is not None:
_balance = _worker.balance
_worker.balance = 0
_worker.save()
_advance = Advance.objects.create(worker_id=_worker.id, account_id=_worker.account_id, amount=_balance,
utr="AXPS7170EHAG23G")
data = {'Bank Name: ': _advance.bank_name, 'Bank A/C: ': _advance.account_number, 'Deposited: ': _advance.amount, 'IFSC: ': _advance.ifsc, 'Txn. No.: ': _advance.utr}
return Response(data)
| [
"kali@dailysalary.in"
] | kali@dailysalary.in |
b6a65345f6f8b4e0cf98221415f4497b86696cbe | db1e48d5f7b1b51c5a535b2a9477e350ad7d35c7 | /angr/engines/pcode/arch/ArchPcode_PIC_12_LE_16_PIC_12C5xx.py | 2674614f8e7b1d4e0fc0450e838d0ee8201dd2af | [
"BSD-2-Clause"
] | permissive | helloexp/angr | f4540c737c9e828e1bdd95bae0758558dd742143 | 724f2172bec21f51b2f798ab5613cf86bef62dae | refs/heads/master | 2022-01-31T03:15:09.922425 | 2022-01-15T06:34:54 | 2022-01-15T06:34:54 | 216,943,426 | 0 | 0 | BSD-2-Clause | 2019-10-23T01:39:41 | 2019-10-23T01:39:40 | null | UTF-8 | Python | false | false | 1,210 | py | ###
### This file was automatically generated
###
from archinfo.arch import register_arch, Endness, Register
from .common import ArchPcode
class ArchPcode_PIC_12_LE_16_PIC_12C5xx(ArchPcode):
name = 'PIC-12:LE:16:PIC-12C5xx'
pcode_arch = 'PIC-12:LE:16:PIC-12C5xx'
description = 'PIC-12C5xx'
bits = 16
ip_offset = 0x0
sp_offset = 0x2
bp_offset = sp_offset
instruction_endness = Endness.LE
register_list = [
Register('indf', 1, 0x0),
Register('tmr0', 1, 0x1),
Register('pcl.0', 1, 0x2),
Register('status.0', 1, 0x3),
Register('fsr.0', 1, 0x4),
Register('osccal', 1, 0x5),
Register('gpio', 1, 0x6),
Register('pc', 2, 0x0, alias_names=('ip',)),
Register('stkptr', 1, 0x2),
Register('w', 1, 0x3),
Register('pcl', 1, 0x4),
Register('fsr', 1, 0x5),
Register('status', 1, 0x6),
Register('pa', 1, 0x7),
Register('z', 1, 0x8),
Register('dc', 1, 0x9),
Register('c', 1, 0xa),
Register('option', 1, 0xb),
Register('tris', 1, 0x20)
]
register_arch(['pic-12:le:16:pic-12c5xx'], 16, Endness.LE, ArchPcode_PIC_12_LE_16_PIC_12C5xx)
| [
"noreply@github.com"
] | helloexp.noreply@github.com |
7e56d4685227decfee31b1786f9de6321591bb55 | 3637fe729395dac153f7abc3024dcc69e17f4e81 | /personal/pythonic/rules/rule2.py | aefdd616b730723104181fffd2d3a6dc46a081e5 | [] | no_license | madmonkyang/cda-record | daced6846c2456f20dddce7f9720602d1583a02a | c431e809e8d0f82e1bca7e3429dd0245560b5680 | refs/heads/master | 2023-06-15T08:16:46.230569 | 2021-07-15T16:27:36 | 2021-07-15T16:27:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | # ---------------- 避免劣化代码 ----------------
"""
1)避免只用大小写来区分不同的对象
2)避免使用容易引起混淆的名称,如 element, list, dict
3)不要害怕过长的变量名(不追求过分缩写)
"""
# Bad:
# 不推荐使用 list, element 这种变量名
def funA(list, num):
for element in list:
if num == element:
return True
else:
pass
# Good:
def find_num(search_list, num):
for listValue in search_list:
if num == listValue:
return True
else:
pass
# ---------------- 深入认识Python ----------------
"""
不好的风格:
if foo == 'blah': do_blah_thing()
do_one(); do_two(); do_three()
Pythonic风格:
if foo == 'blah':
do_blah_thing()
do_one()
do_two()
do_three()
"""
| [
"silentbalanceyh@126.com"
] | silentbalanceyh@126.com |
b63277ee93f4fd83c0e024ffaf151d39ce83f2bf | 8c39ba92cc71ff78242477d3256f6ee3daa872c7 | /conan/tools/build/cross_building.py | b446d562e003d06e75c9beea85008625a8b25035 | [
"MIT"
] | permissive | conan-io/conan | eb4427e534a0edbb1fb06c753d5d9587faaef93c | bac455d1329b6744cdc41747354a727c9233179f | refs/heads/release/2.0 | 2023-09-03T18:51:54.345761 | 2023-09-03T17:30:43 | 2023-09-03T17:30:43 | 47,190,624 | 7,754 | 1,182 | MIT | 2023-09-14T15:16:09 | 2015-12-01T13:17:02 | Python | UTF-8 | Python | false | false | 2,043 | py |
def cross_building(conanfile=None, skip_x64_x86=False):
"""
Check if we are cross building comparing the *build* and *host* settings. Returns ``True``
in the case that we are cross-building.
:param conanfile: The current recipe object. Always use ``self``.
:param skip_x64_x86: Do not consider cross building when building to 32 bits from 64 bits:
x86_64 to x86, sparcv9 to sparc or ppc64 to ppc32
:return: ``True`` if we are cross building, ``False`` otherwise.
"""
build_os = conanfile.settings_build.get_safe('os')
build_arch = conanfile.settings_build.get_safe('arch')
host_os = conanfile.settings.get_safe("os")
host_arch = conanfile.settings.get_safe("arch")
if skip_x64_x86 and host_os is not None and (build_os == host_os) and \
host_arch is not None and ((build_arch == "x86_64") and (host_arch == "x86") or
(build_arch == "sparcv9") and (host_arch == "sparc") or
(build_arch == "ppc64") and (host_arch == "ppc32")):
return False
if host_os is not None and (build_os != host_os):
return True
if host_arch is not None and (build_arch != host_arch):
return True
return False
def can_run(conanfile):
"""
Validates whether is possible to run a non-native app on the same architecture.
It’s an useful feature for the case your architecture can run more than one target.
For instance, Mac M1 machines can run both `armv8` and `x86_64`.
:param conanfile: The current recipe object. Always use ``self``.
:return: ``bool`` value from ``tools.build.cross_building:can_run`` if exists, otherwise,
it returns ``False`` if we are cross-building, else, ``True``.
"""
# Issue related: https://github.com/conan-io/conan/issues/11035
allowed = conanfile.conf.get("tools.build.cross_building:can_run", check_type=bool)
if allowed is None:
return not cross_building(conanfile)
return allowed
| [
"noreply@github.com"
] | conan-io.noreply@github.com |
acb23822e7825dd59688e84c89494509cdefc861 | ce005d2e7c72cf74491e099c047873bf56c2f0cd | /pymedphys/_vendor/pylinac/core/utilities.py | dd09826632bee13b79df9871f984e67cce4815b1 | [
"Apache-2.0",
"MIT"
] | permissive | ikevin2810/pymedphys | c645c6baccefd7a26fff37775dc72ddf2a14e9f5 | ed408fc6d20e8640dfbd434b681b3b0828dd360d | refs/heads/master | 2022-11-24T06:10:56.193835 | 2020-07-09T07:20:23 | 2020-07-09T07:20:23 | 278,422,520 | 1 | 0 | Apache-2.0 | 2020-07-09T16:59:23 | 2020-07-09T16:59:22 | null | UTF-8 | Python | false | false | 7,175 | py | # Copyright (c) 2014-2019 James Kerns
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Adapted from https://github.com/jrkerns/pylinac/tree/698254258ff4cb87812840c42b34c93ae32a4693
# pylint: disable = redefined-argument-from-local, unidiomatic-typecheck
"""Utility functions for pylinac."""
import collections
import decimal
import os
import os.path as osp
import struct
import subprocess
from datetime import datetime
from pymedphys._imports import numpy as np
from pymedphys._imports import pydicom
def clear_data_files():
"""Delete all demo files, image classifiers, etc from the demo folder"""
demo_folder = osp.join(osp.dirname(osp.dirname(__file__)), "demo_files")
if osp.isdir(demo_folder):
for file in os.listdir(demo_folder):
full_file = osp.join(demo_folder, file)
if osp.isfile(full_file):
os.remove(full_file)
print("Pylinac data files cleared.")
def assign2machine(source_file: str, machine_file: str):
"""Assign a DICOM RT Plan file to a specific machine. The source file is overwritten to contain
the machine of the machine file.
Parameters
----------
source_file : str
Path to the DICOM RTPlan file that contains the fields/plan desired
(e.g. a Winston Lutz set of fields or Varian's default PF files).
machine_file : str
Path to a DICOM RTPlan file that has the desired machine. This is easily obtained from pushing a plan from the TPS
for that specific machine. The file must contain at least one valid field.
"""
dcm_source = pydicom.dcmread(source_file)
dcm_machine = pydicom.dcmread(machine_file)
for beam in dcm_source.BeamSequence:
beam.TreatmentMachineName = dcm_machine.BeamSequence[0].TreatmentMachineName
dcm_source.save_as(source_file)
def is_close(val, target, delta=1):
"""Return whether the value is near the target value(s).
Parameters
----------
val : number
The value being compared against.
target : number, iterable
If a number, the values are simply evaluated.
If a sequence, each target is compared to ``val``.
If any values of ``target`` are close, the comparison is considered True.
Returns
-------
bool
"""
try:
targets = (value for value in target)
except (AttributeError, TypeError):
targets = [target] # type: ignore
for target in targets:
if target - delta < val < target + delta:
return True
return False
def typed_property(name, expected_type_or_tuple_of_types):
"""Type-enforced property. Python Cookbook 9.21 (3rd ed)."""
storage_name = "_" + name
@property
def prop(self):
return getattr(self, storage_name, None)
@prop.setter
def prop(self, value):
if not isinstance(value, expected_type_or_tuple_of_types):
raise TypeError(
f"{name} must be a {expected_type_or_tuple_of_types}. Got: {type(value)}"
)
setattr(self, storage_name, value)
return prop
def simple_round(number, decimals: int = 0):
"""Round a number to the given number of decimals. Fixes small floating number errors."""
num = int(round(number * 10 ** decimals))
num /= 10 ** decimals
return num
def isnumeric(obj):
"""Check whether the passed object is numeric in any sense."""
return isinstance(obj, (int, float, decimal.Decimal, np.number))
def is_float_like(number):
return isinstance(number, (float, np.float, np.float16, np.float32, np.float64))
def is_int_like(number):
return isinstance(number, (int, np.int, np.int16, np.int32, np.int64, np.int8))
def is_iterable(obj):
"""Determine if an object is iterable."""
return isinstance(obj, collections.Iterable)
class Structure:
"""A simple structure that assigns the arguments to the object."""
def __init__(self, **kwargs):
self.__dict__.update(**kwargs)
def update(self, **kwargs):
self.__dict__.update(**kwargs)
def decode_binary(file, dtype, num_values=1, cursor_shift=0):
"""Read in a raw binary file and convert it to given data types.
Parameters
----------
file : file object
The open file object.
dtype : int, float, str
The expected data type to return. If int or float, will return numpy array.
num_values : int
The expected number of dtype to return
.. note:: This is not the same as the number of bytes.
cursor_shift : int
The number of bytes to move the cursor forward after decoding. This is used if there is a
reserved section after the read-in segment.
"""
f = file
if dtype == str: # if string
output = f.read(num_values)
if type(f) is not str: # in py3 fc will be bytes
output = output.decode()
# strip the padding ("\x00")
output = output.strip("\x00")
elif dtype == int:
ssize = struct.calcsize("i") * num_values
output = np.asarray(struct.unpack("i" * num_values, f.read(ssize)))
if len(output) == 1:
output = int(output)
elif dtype == float:
ssize = struct.calcsize("f") * num_values
output = np.asarray(struct.unpack("f" * num_values, f.read(ssize)))
if len(output) == 1:
output = float(output)
else:
raise TypeError(f"datatype '{dtype}' was not valid")
# shift cursor if need be (e.g. if a reserved section follows)
if cursor_shift:
f.seek(cursor_shift, 1)
return output
def open_path(path: str):
"""Open the specified path in the system default viewer."""
if os.name == "darwin":
launcher = "open"
elif os.name == "posix":
launcher = "xdg-open"
elif os.name == "nt":
launcher = "explorer"
subprocess.call([launcher, path])
def file_exists(filename: str):
"""Check if the file exists and if it does add a timestamp"""
if osp.exists(filename):
filename, ext = osp.splitext(filename)
mytime = datetime.now().strftime("%Y%m%d%H%M%S")
filename = filename + mytime + ext
return filename
| [
"me@simonbiggs.net"
] | me@simonbiggs.net |
d953e11c4a8d357d4c6c96235dddf34c3f007316 | b0ea541c0aef0fa8946aef3130490dc4fa068e9b | /ABC_PS1_Final/catkin_ws/build/learning_ros_noetic/Part_5/object_grabber/catkin_generated/pkg.develspace.context.pc.py | 09a9f2fa704bbcae1cffdeb519029b82389c0271 | [] | no_license | ABCaps35/ECSE473_ABC | b66c8288412a34c72c858e16fd2f93540291b8ff | f03b9ec90317dd730aa723cb7fa7254ea03e412f | refs/heads/master | 2023-03-09T09:46:47.963268 | 2021-02-11T03:44:19 | 2021-02-11T03:44:19 | 337,913,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 830 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/abcaps35/catkin_ws/devel/include;/home/abcaps35/catkin_ws/src/learning_ros_noetic/Part_5/object_grabber/include".split(';') if "/home/abcaps35/catkin_ws/devel/include;/home/abcaps35/catkin_ws/src/learning_ros_noetic/Part_5/object_grabber/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;roscpp;std_msgs;geometry_msgs;cartesian_planner;tf;xform_utils;object_manipulation_properties;generic_gripper_services".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lobject_grabber_lib;-lobject_grabber_lib2".split(';') if "-lobject_grabber_lib;-lobject_grabber_lib2" != "" else []
PROJECT_NAME = "object_grabber"
PROJECT_SPACE_DIR = "/home/abcaps35/catkin_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"acapelli345@gmail.com"
] | acapelli345@gmail.com |
0037dab49f23b6ce338fa052ef1a4f5907b18201 | 4b44a299bafbd4ca408ce1c89c9fe4a449632783 | /python3/13_OOP/47_Double_Linked_list.py | ee1958aedd7227c94122e736960e288a86341345 | [] | no_license | umunusb1/PythonMaterial | ecd33d32b2de664eaaae5192be7c3f6d6bef1d67 | 1e0785c55ccb8f5b9df1978e1773365a29479ce0 | refs/heads/master | 2023-01-23T23:39:35.797800 | 2020-12-02T19:29:00 | 2020-12-02T19:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | #!/usr/bin/python
"""
Purpose: Double Linked List
h e l l o
3 4 6 8 9
n1 n2 n3
pre_n_add None 3 4
value h e l l o
next_n_add 4 6 8
"""
class DoubleLinkedList:
def __init__(self, data, prev_nd_addr=None, next_nd_addr=None):
self.data = data
self.prev_nd_addr = prev_nd_addr
self.next_nd_addr = next_nd_addr
def set_prev_node_address(self, prev_n_add):
self.prev_nd_addr = prev_n_add
def set_next_node_address(self, next_n_add):
self.next_nd_addr = next_n_add
def __repr__(self):
return f'{self.prev_nd_addr}|{self.data}|{self.next_nd_addr}'
d1 = DoubleLinkedList(10)
print(d1)
d2 = DoubleLinkedList(20)
print(d2)
d3 = DoubleLinkedList(30)
print(d3)
d1.set_next_node_address(id(d2))
d2.set_prev_node_address(id(d1))
d2.set_next_node_address(id(d3))
d3.set_prev_node_address(id(d2))
print()
print(d1)
print(d2)
print(d3)
# Assignment L create a double linked list for word 'hello'
'''
id()|h|id() e l l o
'''
| [
"uday3prakash@gmail.com"
] | uday3prakash@gmail.com |
8d9f5d7fe3cdbefae918b1115493c758f326a538 | 1c91439673c898c2219ee63750ea05ff847faee1 | /configs/_base_/models/swin_transformer_v2/base_256.py | f711a9c8dcebf644d0479a887e4383a630c67384 | [
"Apache-2.0"
] | permissive | ChenhongyiYang/GPViT | d7ba7f00d5139a989a999664ab0874c5c9d53d4d | 2b8882b2da41d4e175fe49a33fcefad1423216f4 | refs/heads/main | 2023-06-08T00:10:07.319078 | 2023-05-26T15:52:54 | 2023-05-26T15:52:54 | 577,075,781 | 78 | 2 | null | null | null | null | UTF-8 | Python | false | false | 857 | py | # model settings
model = dict(
type='ImageClassifier',
backbone=dict(
type='SwinTransformerV2',
arch='base',
img_size=256,
drop_path_rate=0.5),
neck=dict(type='GlobalAveragePooling'),
head=dict(
type='LinearClsHead',
num_classes=1000,
in_channels=1024,
init_cfg=None, # suppress the default init_cfg of LinearClsHead.
loss=dict(
type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'),
cal_acc=False),
init_cfg=[
dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.),
dict(type='Constant', layer='LayerNorm', val=1., bias=0.)
],
train_cfg=dict(augments=[
dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5),
dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5)
]))
| [
"chenhongyiyang@Chenhongyis-MacBook-Pro.local"
] | chenhongyiyang@Chenhongyis-MacBook-Pro.local |
ca4c6b2353645358b198d8ca20aaa41fea654678 | 2f0cb310e2ec8fb176ee240aa964a7eef5ed23b4 | /giico/quality_control_and_material_testing/doctype/organic_impurities/test_organic_impurities.py | 899ff309e9d723fb0ae2427dada4a1eea3848932 | [
"MIT"
] | permissive | thispl/giico | b96cf6b707f361275f8723d15f8ea1f95f908c9c | 14c5631639ab56a586a7962be9871d722c20e205 | refs/heads/master | 2021-06-18T03:56:02.928303 | 2021-04-27T06:42:59 | 2021-04-27T06:42:59 | 200,183,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2021, VHRS and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestOrganicImpurities(unittest.TestCase):
pass
| [
"hereabdulla@gmail.com"
] | hereabdulla@gmail.com |
f057dda260993448165227e609ab66b028018f15 | 1285703d35b5a37734e40121cd660e9c1a73b076 | /at_coder/abc/old/126/d.py | 0892272a0581a2cc5c7ac9868ed404a890a8fa87 | [] | no_license | takin6/algorithm-practice | 21826c711f57131108168775f08e4e13d07a3b38 | f4098bea2085a77d11c29e1593b3cc3f579c24aa | refs/heads/master | 2022-11-30T09:40:58.083766 | 2020-08-07T22:07:46 | 2020-08-07T22:07:46 | 283,609,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | N = int(input())
adj = [ [] for _ in range(N) ]
for _ in range(N):
u,v,w = map(int,input().split())
u -= 1
v -= 1
adj[u].append((v,w))
adj[v].append((u,w))
def dfs(node, w, color):
colors[node] = color
for n,nw in adj[node]:
if colors[n] != -1:
if (w+nw)%2 == 1 and colors[n] == 0: continue
if (w+nw)%2 == 0 and colors[n] == 1: continue
return False
else:
nc = 1 if w+nw%2==0 else 0
if not dfs(node, nw, nc):
return False
return True
colors = [-1] * N
for i in range(N):
if colors[i] == -1:
dfs(i, 1)
for c in colors: print(c) | [
"takayukiinoue116@gmail.com"
] | takayukiinoue116@gmail.com |
a462ab5e1744a75fa107be73da72ae72b7366260 | 5980a1a0ae2bed966dc9d06a1e3f3b4929e17f04 | /director/data_format/dot_dict.py | e8df212277ea5fadfd3e6ef8e8a398eb9ee4b6b9 | [] | no_license | coblan/helpers | 4aa4c6846cacf5f4a176e2de2fade0e98bd8126f | b2d3cb2583ce7469c88165968a8e74cda6e8cf11 | refs/heads/master | 2023-08-24T22:37:13.239226 | 2023-08-23T07:38:22 | 2023-08-23T07:38:22 | 83,221,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | class DotObj(object):
def __init__(self,dc):
for k,v in dc.items():
setattr(self,k,v)
def __getattr__(self,name):
try:
return object.__getattr__(self,name)
except AttributeError:
return '' | [
"he_yulin@163.com"
] | he_yulin@163.com |
f00fc45c49e835d6c5baeef7d26a870c3c2cd541 | cedab14839cfc276f028436ba79d103a8aff0d5b | /Philippines/Subject3_Math/E3_Math_StreetView/1_PassFailSplit.py | 4bc194e252404379e22294a537ebb7586a9e8911 | [] | no_license | wmgeolab/schoolCNN | aa686a4103695c1e10f5afa68ec2919761d33c15 | 1c73ec90732ec565ce552b27e4b2108a8ee916da | refs/heads/master | 2021-01-09T03:25:44.895023 | 2020-02-21T22:52:41 | 2020-02-21T22:52:41 | 242,230,029 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | import pandas as pd
import shutil
import os
# Read in the data
df = pd.read_csv("./Philippines/Subject3_Math/E3_Math_StreetView/data/y1314_Math.csv")
df.head()
# Set up the base directory
directory = "./Philippines/Subject3_Math/E3_Math_StreetView/data/imagery/"
for filename in os.listdir(directory):
# The firt 6 characters in the file's path name are the school's unique ID number
schoolID = filename[0:6]
# Use the school's ID to subset the dataframe to that school
subset = df[df['school_id'] == int(schoolID)]
# Construct the name of the file that will be copied into the pass or fail folder
fname = directory + filename
# If the school's intervention value is 1, move it into the fail folder (the school scored below average on the NAT)
if subset['intervention'].tolist()[0] == 1:
shutil.copy(fname, "./Philippines/Subject3_Math/E3_Math_StreetView/data/fail/")
# If the school's intervention value is 0, move it into the pass folder (the school scored above average on the NAT)
if subset['intervention'].tolist()[0] == 0:
shutil.copy(fname, "./Philippines/Subject3_Math/E3_Math_StreetView/data/pass/")
| [
"hmbaier@email.wm.edu"
] | hmbaier@email.wm.edu |
5cfdfcac2d6b71e28041fc9cbcbab1ca89063cc2 | 346efbc9dbbb1d656fd579400530c0269dfce56d | /codeforces/1409/d.py | ec0869bd1541bf903931095714622d10d2f4ed60 | [] | no_license | lmun/competitiveProgramingSolutions | 1c362e6433fc985e371afe88f08277268c46afde | 06d62240e2b3c58dd9ee72e41a78f7246d966652 | refs/heads/master | 2023-08-24T04:52:04.218922 | 2021-10-29T15:06:28 | 2021-10-29T15:06:28 | 167,073,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | def a(n,s):
if sum(map(int,str(n)))<=s:
return 0
return 10-n%10+10*a(1+n//10,s) if n%10 else 10*a(n//10,s)
for t in range(int(input())):
print(a(*map(int,input().split()))) | [
"lester@ug.uchile.cl"
] | lester@ug.uchile.cl |
1372f4114d3691052c65bfba4ddb42ea9662728d | 2e6c95871bd255873fb563347c0f070e6fcdde74 | /rf-grid-search.py | df196f6c517feac250666fcca3ebd1f5ff59ccc2 | [] | no_license | MSBradshaw/BioHackathon2020 | 3203c5232bebd70d2c2a88b7f49063a09da023c4 | 31826b698a408541200b6f75bfe9c03217bf2d1a | refs/heads/master | 2022-08-05T11:57:32.221444 | 2020-05-29T17:30:29 | 2020-05-29T17:30:29 | 258,961,184 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
import pandas as pd
from bs4 import BeautifulSoup
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestClassifier
print('RANDOM FOREST')
train = pd.read_csv('train.csv')
abstracts = [BeautifulSoup(x).get_text() for x in train['abstract']]
tfidf = TfidfVectorizer()
X = tfidf.fit_transform(abstracts)
y = train['type'].to_numpy()
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [
{'n_estimators': [100, 200, 300, 400, 500, 600, 700, 800, 900], 'max_features': ['auto', 'sqrt', 'log2', None],'criterion':['gini','entropy']}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(
RandomForestClassifier(), tuned_parameters, scoring='%s_macro' % score
)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
| [
"michaelscottbradshaw@gmail.com"
] | michaelscottbradshaw@gmail.com |
8202e71838d04d0b2b4ccf5c3a73e1b423a6495a | 933ed73cdf117fc6c88c1ebba7a17b82807a16e8 | /docs/02.AI_ML/code-1805/Day06all/audio.py | a484f78da089d31453fcf1e9aa31a0039d15fc49 | [
"Apache-2.0"
] | permissive | wan230114/PythonNote | c4fff80f6f3849ed0b0346526d3c6197a4867d2c | f4989a44c03efdcea3f7aa3484e3470e7fd983eb | refs/heads/master | 2023-01-28T14:51:07.304066 | 2023-01-12T18:38:16 | 2023-01-12T18:38:16 | 242,367,069 | 5 | 6 | Apache-2.0 | 2021-01-05T23:35:10 | 2020-02-22T15:45:01 | JavaScript | UTF-8 | Python | false | false | 886 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import numpy.fft as nf
import scipy.io.wavfile as wf
import matplotlib.pyplot as mp
sample_rate, sigs = wf.read('../../data/freq.wav')
sigs = sigs / 2 ** 15
times = np.arange(len(sigs)) / sample_rate
freqs = nf.fftfreq(len(sigs), d=1 / sample_rate)
ffts = nf.fft(sigs)
pows = np.abs(ffts)
mp.figure('Audio', facecolor='lightgray')
mp.subplot(121)
mp.title('Time Domain', fontsize=16)
mp.xlabel('Time', fontsize=12)
mp.ylabel('Signal', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(times, sigs, c='dodgerblue')
mp.subplot(122)
mp.title('Frequency Domain', fontsize=16)
mp.xlabel('Frequency', fontsize=12)
mp.ylabel('Power', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(freqs[freqs >= 0], pows[freqs >= 0], c='orangered')
mp.tight_layout()
mp.show()
| [
"1170101471@qq.com"
] | 1170101471@qq.com |
7b893c27f6bb7e81de39e943a5200791f394b746 | 65a5f74ede8079d693a70bd9597f063acfbf33fd | /tasks/mc6800_hp53xx/hp5370.py | eeeb858175d2527d953467f6cbdd6f7c54c464ae | [] | no_license | pombredanne/PyRevEng | 5372a8322313c81ce065c689eb4b816596c90f8b | 08083744806258cfa31edd0132456d70377a9f71 | refs/heads/master | 2020-09-20T06:55:24.020304 | 2019-11-11T17:18:38 | 2019-11-11T17:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,795 | py | #!/usr/local/bin/python
#
# Functions common to:
# HP 5370A
# HP 5370B
# PyRevEng classes
import tree
import math
import const
def chargen(p, adr):
for a in range(adr, adr + 16):
const.seven_segment(p, a)
p.setlabel(adr, "CHARGEN")
#######################################################################
#
def keyboard_dispatch(p, cpu, adr = 0x7962):
assert p.m.rd(adr) == 0xce
assert p.m.rd(adr + 3) == 0x7e
ptr = p.m.b16(adr + 1)
ii = cpu.disass(adr + 3, "ins")
const.ptr(p, ptr, 2)
tbl = p.m.b16(ptr)
aa = tbl
xx = dict()
for col in range(8,0,-1):
p.setlabel(aa, "Keyboard_Column_%d" % col)
for row in range(1,5):
x = const.ptr(p, aa, 2)
dd = p.m.b16(aa)
cpu.disass(dd)
if dd not in xx:
ii.flow("call", "XFUNC", dd)
xx[dd] = True
aa += 2
x = p.t.add(tbl, aa, "tbl")
x.blockcmt += "-\nDispatch table for Keyboard commands\n"
p.setlabel(p.m.b16(tbl + 4), "KEY_Ext_Arm")
p.setlabel(p.m.b16(tbl + 6), "KEY_UNDEF")
p.setlabel(p.m.b16(tbl + 10), "KEY_Ext_Hold_Off")
p.setlabel(p.m.b16(tbl + 14), "KEY_Reset")
#######################################################################
# List of two-letter HPIB commands
#
def hpib_cmd_table(p, adr, len = 26):
p.hpib_cmd = list()
x = p.t.add(adr, adr + 2 * len, "cmd-table")
x.blockcmt += "-\nTable of two-letter HPIB commands\n"
for i in range(adr, adr + 2 * len, 2):
const.txtlen(p,i,2)
p.hpib_cmd.append([p.m.ascii(i, 2),])
def hpib_arg_range(p, adr, len = 14):
x = p.t.add(adr, adr + len * 2, "arg-range")
x.blockcmt += "-\nTable of legal range of numeric argument for HPIB cmd"
for i in range(0, len):
aa = adr + i * 2
x = const.byte(p, aa, 2)
l = p.m.rd(aa)
h = p.m.rd(aa + 1)
x.lcmt(p.hpib_cmd[i][0] + "[%d-%d]" % (l,h))
p.hpib_cmd[i].append(l)
p.hpib_cmd[i].append(h)
def hpib_tbl_idx(p, adr):
aa = adr
for i in p.hpib_cmd:
if len(i) == 1:
break
x = const.byte(p, aa)
i.append(p.m.rd(aa))
x.lcmt(i[0])
aa += 1
x = p.t.add(adr, aa, "idx-table")
x.blockcmt += "-\nIndex into cmd table, add numeric arg"
def dispatch_table_arg(p, adr, cpu):
assert p.m.rd(adr) == 0xce
assert p.m.rd(adr + 3) == 0x7e
ptr = p.m.b16(adr + 1)
ii = cpu.disass(adr + 3, "ins")
const.ptr(p, ptr, 2)
tbl = p.m.b16(ptr)
aa = tbl
xx = dict()
for i in p.hpib_cmd:
if len(i) == 1:
break
for j in range(i[1], i[2] + 1):
x = const.ptr(p, aa, 2)
y = i[0] + "%d" % j
dd = p.m.b16(aa)
cpu.disass(dd)
if dd not in xx:
ii.flow("call", "XFUNC", dd)
xx[dd] = True
p.setlabel(dd, "CMD_" + y + "_" + gpib_expl[y])
aa += 2
x = p.t.add(tbl, aa, "idx-table")
x.blockcmt += "-\nDispatch table for HPIB cmds with arg"
def dispatch_table_noarg(p, adr, cpu):
assert p.m.rd(adr) == 0xce
assert p.m.rd(adr + 3) == 0x7e
ptr = p.m.b16(adr + 1)
ii = cpu.disass(adr + 3, "ins")
const.ptr(p, ptr, 2)
tbl = p.m.b16(ptr)
aa = tbl
xx = dict()
for i in p.hpib_cmd:
if len(i) > 1:
continue
x = const.ptr(p, aa, 2)
y = i[0]
dd = p.m.b16(aa)
cpu.disass(dd)
if dd not in xx:
ii.flow("call", "XFUNC", dd)
xx[dd] = True
p.setlabel(dd, "CMD_" + y + "_" + gpib_expl[y])
aa += 2
x = p.t.add(tbl, aa, "idx-table")
x.blockcmt += "-\nDispatch table for HPIB cmds without arg\n"
# Explanation of the HP5370[AB] HPIB Commands
gpib_expl = {
"FN1": "Time Interval",
"FN2": "Trigger Levels",
"FN3": "Frequency",
"FN4": "Period",
"FN5": "???",
"GT1": "Single Period",
"GT2": "0.01s",
"GT3": "0.1s",
"GT4": "1s",
"ST1": "Mean",
"ST2": "StdDev",
"ST3": "Min",
"ST4": "Max",
"ST5": "Disp Ref",
"ST6": "Clr Ref",
"ST7": "Disp Evts",
"ST8": "Set Ref",
"ST9": "Disp All",
"SS1": "Sample Size = 1",
"SS2": "Sample Size = 100",
"SS3": "Sample Size = 1k",
"SS4": "Sample Size = 10k",
"SS5": "Sample Size = 100k",
"MD1": "FP Rate",
"MD2": "Hold until MR",
"MD3": "Fast",
"MD4": "Fast + SRQ",
"IN1": "Input: Start+Stop",
"IN2": "Input: Stop+Stop",
"IN3": "Input: Start+Start",
"IN4": "Input: Stop+Start",
"SA1": "Start Pos",
"SA2": "Start Neg",
"SO1": "Stop Pos",
"SO2": "Stop Neg",
"SE1": "Arm Pos",
"SE2": "Arm Neg",
"AR1": "+T.I. Arming Only",
"AR2": "+T.I. Arming",
"EH0": "Ext Holdoff dis",
"EH1": "Ext Holdoff ena",
"EA0": "Ext Arm dis",
"EA1": "Ext Arm ena",
"IA1": "Internal Arm Auto",
"IA2": "Start Chan Arm",
"IA3": "Stop Chan Arm",
"MR": "Manual Rate",
"MI": "Manual Input",
"SL": "Slope Local",
"SR": "Slope Remote",
"TL": "Trigger Local",
"TR": "Trigger Remote",
"TE": "Teach",
"PC": "Period Complement",
"TB0": "Ascii",
"TB1": "Binary",
"SB": "Sample Size Binary",
"LN": "Learn",
"TA": "Trigger Start",
"TO": "Trigger Stop",
}
#######################################################################
# HP5370B uses its own (weird|smart) floating point format.
#
# As far as I can tell, it looks like this: S{1}M{47}E{8} where the
# exponent is 2's complement. But there are two scaling factors
# involved, so the value is: (S * M{31.16} * 2^e * 5e-9)
#
# XXX: Hmm, the mantissa may be a 32.16 2' complement number...
#
def float_render(p, a):
x = p.m.rd(a + 0)
if x & 0x80:
s = -1
x ^= 0x80
else:
s = 1
m = math.ldexp(x, 24)
m += math.ldexp(p.m.rd(a + 1), 16)
m += math.ldexp(p.m.rd(a + 2), 8)
m += math.ldexp(p.m.rd(a + 3), 0)
m += math.ldexp(p.m.rd(a + 4), -8)
m += math.ldexp(p.m.rd(a + 5), -16)
e = p.m.s8(a + 6)
v = math.ldexp(m * 5e-9, e)
x = "%.9e" % v
if x.find(".") == -1 and x.find("e") == -1:
x = x + "."
print("FLOAT", "%x" % a, x)
return x
class float(tree.tree):
def __init__(self, p, adr):
tree.tree.__init__(self, adr, adr + 7, "dot_float")
p.t.add(adr, adr + 7, "dot-float", True, self)
self.render = self.rfunc
self.nbr = float_render(p, adr)
self.a['const'] = "FP=" + self.nbr
def rfunc(self, p, t):
s = ".FLOAT\t%s" % self.nbr
return (s,)
###########################################################
def dsp_dispatch(p, cpu, adr = 0x683b):
assert p.m.rd(adr) == 0xce
assert p.m.rd(adr + 3) == 0xbd
tbl = p.m.b16(adr + 1)
ii = cpu.disass(adr + 3)
p.setlabel(tbl, "DSP_FUNC_TABLE")
x=p.t.add(tbl, tbl + 8 * 2, "tbl")
x.blockcmt += "-\nTable of display functions\n"
dspf= ("AVG", "STD", "MIN", "MAX", "REF", "EVT", "DS6", "ALL")
j=0
for i in range(tbl, tbl + 8 * 2, 2):
x = const.ptr(p, i, 2)
w = p.m.b16(i)
p.setlabel(w, "DSP_" + dspf[j])
ii.flow("call", "DSPFUNC", w)
cpu.disass(w)
j += 1
###########################################################
#x = p.t.add(0x6f00,0x7000, "tbl")
#x.blockcmt += "Table of I^2>>8\n"
def sqare_table_render(p, t):
return (
"FOR I (0..255):",
" .BYTE ((I * I) >> 8)",
""
)
def square_table(p, adr = 0x6f00):
x = p.t.add(0x6f00,0x7000, "tbl")
x.blockcmt += "-\nTable of I^2>>8\n"
x.render = sqare_table_render
x.fold = True
| [
"phk@FreeBSD.org"
] | phk@FreeBSD.org |
7eb892b540bec24047ab9b270b2878817367efbe | a0947c2778742aec26b1c0600ceca17df42326cd | /Python/PythonInADay2/CSV-Files-Drill/37of79-76.py | 5d3b516908848e56823b4441139499a3dc51e4cb | [] | no_license | JohnCDunn/Course-Work-TTA | 5758319d4607114914ba9723328658bed8fb2024 | 8c4f60d51007dac2ac4cceb84b0f9666e143c0d7 | refs/heads/master | 2021-01-10T16:37:02.609879 | 2016-02-01T18:05:38 | 2016-02-01T18:05:38 | 49,983,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | import wx
class Frame(wx.Frame):
def __init__(self, title):
wx.Frame.__init__(self, None,\
title=title, size=(300,200))
panel = wx.Panel(self)
button = wx.Button(panel,label="Exit",size=(100,40),pos=(100,30))
# Bind button event to the function self.exit
button.Bind(wx.EVT_BUTTON, self.exit)
# Create menu bar
menuBar = wx.MenuBar()
# Create wx menus
fileMenu = wx.Menu()
editMenu = wx.Menu()
# Add items to fileMenu
fileMenu.Append(wx.NewId(), "New File")
fileMenu.Append(wx.NewId(), "Open")
fileMenu.Append(wx.NewId(), "Exit")
# Add fileMenu and editMenu to menuBar
menuBar.Append(fileMenu, "File")
menuBar.Append(editMenu, "Edit")
self.SetMenuBar(menuBar)
def exit(self, event):
self.Destroy()
app = wx.App()
frame = Frame("Python GUI")
frame.Show()
app.MainLoop()
| [
"JohnClydeDunn@Gmail.com"
] | JohnClydeDunn@Gmail.com |
158f9e632271af09efccef3413b918b3039ae34d | 5e80f0b1af9fbf9dc774dbb68aa603574e4ae0ba | /algorithm-study/baekjun/1655.py | 5e0d2afac622a64349c666e62c305e6b93e0a95a | [] | no_license | namujinju/study-note | 4271b4248b3c4ac1b96ef1da484d86569a030762 | 790b21e5318a326e434dc836f5f678a608037a8c | refs/heads/master | 2023-02-04T13:25:55.418896 | 2020-12-26T10:47:11 | 2020-12-26T10:47:11 | 275,279,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | import heapq as hq
import sys
n = int(input())
arr = []
min_heap = []
for _ in range(n):
answer = []
i = int(sys.stdin.readline())
arr.append(i)
for j in arr:
hq.heappush(min_heap, j)
for j in range(len(min_heap)):
answer.append(hq.heappop(min_heap))
print(answer[(len(answer)-1)//2])
| [
"59328810+namujinju@users.noreply.github.com"
] | 59328810+namujinju@users.noreply.github.com |
daf2fd690c88a4240ea99c0bad37cc113a3714e3 | 1a0e2871897fec3d345653fc3502909d9d2c48c0 | /cellprofiler_core/analysis/request/_debug_complete.py | bfe423f13d1202e2543f6980897e577b1bff1039 | [] | no_license | citypalmtree/core | 91ec8d75a09240c57a64708985a2e13fdeaab23d | 51f7aa318f126d4ec7be59780c8f88d4208b0ab1 | refs/heads/master | 2022-11-15T03:13:01.843990 | 2020-07-08T01:56:39 | 2020-07-08T01:56:39 | 277,964,195 | 0 | 0 | null | 2020-07-08T01:54:03 | 2020-07-08T01:54:02 | null | UTF-8 | Python | false | false | 204 | py | import cellprofiler_core.utilities.zmq.communicable.request._analysis_request
class DebugComplete(
cellprofiler_core.utilities.zmq.communicable.request._analysis_request.AnalysisRequest
):
pass
| [
"allen.goodman@icloud.com"
] | allen.goodman@icloud.com |
ec2100ffe81fc6bc9ee3ca2204b9dd7491bff4ad | 7a20dac7b15879b9453150b1a1026e8760bcd817 | /Curso/Challenges/URI/1154Ages.py | 28441b2097ef52e9af04e5429015523bfb5896f7 | [
"MIT"
] | permissive | DavidBitner/Aprendizado-Python | 7afbe94c48c210ddf1ab6ae21109a8475e11bdbc | e1dcf18f9473c697fc2302f34a2d3e025ca6c969 | refs/heads/master | 2023-01-02T13:24:38.987257 | 2020-10-26T19:31:22 | 2020-10-26T19:31:22 | 283,448,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | soma = media = 0
while True:
n = int(input())
if n < 0:
break
media += n
soma += 1
media /= soma
print(f"{media:.2f}")
| [
"david-bitner@hotmail.com"
] | david-bitner@hotmail.com |
ca699a7935f6cc559fb7425359c4f7e78b6d3cb4 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoJets/JetProducers/python/TracksForJets_cff.py | 059b08f72f44f40aa58bc9d58c52963f54292b51 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 264 | py | import FWCore.ParameterSet.Config as cms
from SimGeneral.HepPDTESSource.pythiapdt_cfi import *
trackRefsForJets = cms.EDProducer("ChargedRefCandidateProducer",
src = cms.InputTag('trackWithVertexRefSelector'),
particleType = cms.string('pi+')
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
966dba2f8f26a6952139c9d1757edd97074c1c7a | 505343f6ace00d22f8753c1a943a5794a619e698 | /katas/Python/2 kyu/Assembler interpreter part II 58e61f3d8ff24f774400002c.py | fdbc8ca03e20526570fb2185924e2f3f83a72eed | [] | no_license | bullet1337/codewars | 7652e50bf768bc47976a9124dd98b93602d4d458 | ba7f13ddd766158b41e036dae5d6b15f7f08761a | refs/heads/master | 2020-03-27T05:04:03.751302 | 2019-04-30T17:45:39 | 2019-04-30T17:45:39 | 145,991,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,575 | py | # https://www.codewars.com/kata/58e61f3d8ff24f774400002c
class Frame:
def __init__(self, ip=0):
self.ip = ip
self.cmp = 0
def mov(operands, frame):
registers[operands[0]] = registers[operands[1]] if operands[1].isalpha() else int(operands[1])
frame.ip += 1
def inc(operands, frame):
registers[operands[0]] += 1
frame.ip += 1
def dec(operands, frame):
registers[operands[0]] -= 1
frame.ip += 1
def add(operands, frame):
registers[operands[0]] += registers[operands[1]] if operands[1].isalpha() else int(operands[1])
frame.ip += 1
def sub(operands, frame):
registers[operands[0]] -= registers[operands[1]] if operands[1].isalpha() else int(operands[1])
frame.ip += 1
def mul(operands, frame):
registers[operands[0]] *= registers[operands[1]] if operands[1].isalpha() else int(operands[1])
frame.ip += 1
def div(operands, frame):
registers[operands[0]] //= registers[operands[1]] if operands[1].isalpha() else int(operands[1])
frame.ip += 1
def jmp(operands, frame):
frame.ip = labels_map[operands[0]]
def call(operands, frame):
frames.append(Frame(labels_map[operands[0]]))
frame.ip += 1
def ret(operands, frame):
frames.pop()
def end(operands, frame):
global success
success = True
frames.pop()
def msg(operands, frame):
global output
output = ''.join(operand[1:-1] if operand[0] == "'" else str(registers[operand]) for operand in operands)
frame.ip += 1
def cmp(operands, frame):
frame.cmp = (registers[operands[0]] if operands[0].isalpha() else int(operands[0])) \
- (registers[operands[1]] if operands[1].isalpha() else int(operands[1]))
frame.ip += 1
def jne(operands, frame):
if frame.cmp != 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def je(operands, frame):
if frame.cmp == 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def jge(operands, frame):
if frame.cmp >= 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def jg(operands, frame):
if frame.cmp > 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def jle(operands, frame):
if frame.cmp <= 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def jl(operands, frame):
if frame.cmp < 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def parse(program):
instructions = []
instruction = []
token = ''
i = 0
end = False
while i < len(program):
c = program[i]
if c == "'":
token += c
i += 1
while i < len(program) and program[i] != "'":
token += program[i]
i += 1
token += program[i]
elif c == ';':
if instruction:
instructions.append(instruction)
instruction = []
i += 1
while i < len(program) and program[i] != '\n':
i += 1
elif c == ':':
labels_map[token] = len(instructions)
token = ''
elif c == '\n':
if token:
instruction.append(token)
end |= token == 'end'
token = ''
instructions.append(instruction)
instruction = []
elif c == ' ' or c == ',' or c == '\t':
if token:
end |= token == 'end'
instruction.append(token)
token = ''
else:
token += c
i += 1
if token:
end |= token == 'end'
instruction.append(token)
instructions.append(instruction)
return instructions if end else None
def assembler_interpreter(program):
global registers, labels_map, frames, success, output
registers = {}
labels_map = {}
frames = [Frame()]
success = False
output = None
instructions_map = {
'mov': mov, 'inc': inc, 'dec': dec, 'add': add, 'sub': sub, 'mul': mul, 'div': div, 'jmp': jmp, 'cmp': cmp,
'jne': jne, 'je': je, 'jge': jge, 'jg': jg, 'jle': jle, 'jl': jl, 'call': call, 'ret': ret, 'msg': msg,
'end': end
}
instructions = parse(program)
if instructions is None:
return -1
while frames and frames[-1].ip < len(instructions):
instruction, *operands = instructions[frames[-1].ip]
instructions_map[instruction](operands, frame=frames[-1])
return output if success else -1 | [
"alichek95@mail.ru"
] | alichek95@mail.ru |
33bea5a81924a0c881a7e98ae59251af0b7efea9 | 4d07dfc5005ffe1d40337f99dea2ce20a5454a4e | /call-management/rule-management/create-company-greeting/code-samples/createCompanyGreeting.py | 69c0da247695ca7fa8e303df8fcf3c657f204ab1 | [] | no_license | ringcentral/ringcentral-api-code-samples | c6160c7cf305ec01709ddf87e830a513e3b2d17e | 92d75734e82809c56ae572b1a0347d5e8c222a0e | refs/heads/master | 2021-12-25T02:09:17.653787 | 2019-08-30T22:06:03 | 2019-08-30T22:06:03 | 204,058,290 | 2 | 4 | null | 2021-11-30T14:37:01 | 2019-08-23T19:28:26 | C# | UTF-8 | Python | false | false | 753 | py | # https://developers.ringcentral.com/my-account.html#/applications
# Find your credentials at the above url, set them as environment variables, or enter them below
# PATH PARAMETERS
accountId = '<ENTER VALUE>'
import os
from ringcentral import SDK
rcsdk = SDK(os.environ['clientId'], os.environ['clientSecret'], os.environ['serverURL'])
platform = rcsdk.platform()
platform.login(os.environ['username'], os.environ['extension'], os.environ['password'])
builder = rcsdk.create_multipart_builder()
builder.set_body({
'type': 'Company'
})
binary = ('mygreeting.wav', open('mygreeting.wav','r').read(), 'audio/wav')
builder.add(binary)
request = builder.request(f'/restapi/v1.0/account/{accountId}/greeting')
resp = platform.send_request(request)
| [
"drew.ligman@gmail.com"
] | drew.ligman@gmail.com |
b7c88331aa45890842ae86b76706f18dc7eec82d | e9757274ddb8484e27590ff0cc3f24550776c6cc | /Solved/0118/0118.py | 27c242f3f3dfd5e5fd672ace23028e7362ab786a | [] | no_license | Jinmin-Goh/LeetCode | 948a9b3e77eb03507aad6f3c78640aa7f00e6ad5 | d6e80b968032b08506c5b185f66d35c6ff1f8bb9 | refs/heads/master | 2020-09-22T10:22:18.443352 | 2020-09-06T06:34:12 | 2020-09-06T06:34:12 | 225,153,497 | 1 | 1 | null | 2020-01-29T15:16:53 | 2019-12-01T11:55:25 | Python | UTF-8 | Python | false | false | 666 | py | # Problem No.: 118
# Solver: Jinmin Goh
# Date: 20200115
# URL: https://leetcode.com/problems/pascals-triangle/
import sys
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if not numRows:
return []
if numRows == 1:
return [[1]]
if numRows == 2:
return [[1], [1,1]]
ans = [[1], [1,1]]
for i in range(2, numRows + 1):
print(ans, i)
ans.append([1])
for j in range(1, i - 1):
ans[i].append(ans[i - 1][j - 1] + ans[i - 1][j])
ans[i].append(1)
ans.pop(1)
return ans
| [
"eric970901@gmail.com"
] | eric970901@gmail.com |
2bfd1f9141a25054ee6e3c064715759677b1c827 | bed34365a9dab825fd9f4a4ff1b0863f441266ac | /neutron/tests/unit/services/l3_router/test_l3_router_plugin.py | f045e58567b74fdf465206b618b148d783818291 | [
"Apache-2.0"
] | permissive | openstack/neutron | 0913ee3cd69d5bdb9c10aa084d4e1803abee320c | dde31aae392b80341f6440eb38db1583563d7d1f | refs/heads/master | 2023-08-31T13:09:41.831598 | 2023-08-31T11:37:30 | 2023-08-31T11:37:30 | 2,400,289 | 1,174 | 1,325 | Apache-2.0 | 2022-06-29T08:00:05 | 2011-09-16T16:04:08 | Python | UTF-8 | Python | false | false | 1,192 | py | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.services.l3_router import l3_router_plugin as lrp
from neutron.tests import base
class TestL3PluginDvrConditional(base.BaseTestCase):
def _test_dvr_alias_exposed(self, enabled):
cfg.CONF.set_override('enable_dvr', enabled)
plugin = lrp.L3RouterPlugin()
exposed = 'dvr' in plugin.supported_extension_aliases
self.assertEqual(enabled, exposed)
def test_dvr_alias_exposed_enabled(self):
self._test_dvr_alias_exposed(enabled=True)
def test_dvr_alias_exposed_disabled(self):
self._test_dvr_alias_exposed(enabled=False)
| [
"ihrachys@redhat.com"
] | ihrachys@redhat.com |
73bf36c731ba344577a5b0017978458b51d26d58 | 6a95b330e1beec08b917ff45eccfd6be3fd4629f | /kubernetes/test/test_v1beta1_cluster_role_list.py | 5817779990b99ae2d28c041fd3d6cc6e8c697e19 | [
"Apache-2.0"
] | permissive | TokkoLabs/client-python | f4a83d6540e64861b59e322c951380a670578d7f | f1ad9c6889105d8510472606c98f8d3807f82020 | refs/heads/master | 2023-07-14T01:36:46.152341 | 2017-12-21T21:32:11 | 2017-12-21T21:32:11 | 115,042,671 | 0 | 0 | Apache-2.0 | 2021-08-06T03:29:17 | 2017-12-21T20:05:15 | Python | UTF-8 | Python | false | false | 1,011 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_cluster_role_list import V1beta1ClusterRoleList
class TestV1beta1ClusterRoleList(unittest.TestCase):
""" V1beta1ClusterRoleList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1ClusterRoleList(self):
"""
Test V1beta1ClusterRoleList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_cluster_role_list.V1beta1ClusterRoleList()
pass
if __name__ == '__main__':
unittest.main()
| [
"mehdy@google.com"
] | mehdy@google.com |
52f0f1e13a19477a42d2b64fe74e4c98a2fd2bb5 | 247b4e8e425b06a96a835426304629b39ed4021c | /foo/logs.py | 4e95137361ffe91ec3fc42b4c147f928dce627b3 | [
"MIT"
] | permissive | RobertoPrevato/PythonCLI | 63f19a8e07e44731684aef0e7394afd7b63400fb | 4d4af7cd66ab8f9a5bed2a5d01236ef29753401f | refs/heads/master | 2023-04-04T17:45:38.330844 | 2020-07-06T09:02:17 | 2020-07-06T09:02:17 | 271,034,925 | 2 | 0 | MIT | 2021-04-20T20:01:41 | 2020-06-09T15:06:48 | Python | UTF-8 | Python | false | false | 698 | py | import logging
import logging.handlers
from datetime import datetime
from essentials.folders import ensure_folder
logger = None
def get_app_logger():
global logger
if logger is not None:
return logger
logger = logging.getLogger("app")
logger.setLevel(logging.INFO)
max_bytes = 24 * 1024 * 1024
file_handler = logging.handlers.RotatingFileHandler
now = datetime.now()
ts = now.strftime("%Y%m%d")
ensure_folder(f"logs/{ts}")
handler = file_handler(f"logs/{ts}/app.log", maxBytes=max_bytes, backupCount=5)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.addHandler(logging.StreamHandler())
return logger
| [
"roberto.prevato@gmail.com"
] | roberto.prevato@gmail.com |
279de8adb2b3699b3b64a4025c81b0b05274086a | 4eaa1b9b08914e0a2cc9276363e489ccef19d3a2 | /ch8/greet_users.py | 63aab04ab28079790f3930251a60e913741e89b7 | [] | no_license | melihcanyardi/Python-Crash-Course-2e-Part-I | 69b3b5b3f63cdbd7be6fabd6d4f2ddfd9a3434a3 | 0c9b250f512985c04b2c0397f3afaa8bf3a57f17 | refs/heads/main | 2023-03-12T21:43:14.012537 | 2021-03-03T19:23:41 | 2021-03-03T19:23:41 | 344,236,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | def greet_users(names):
"""Print a simple greeting to each user."""
for name in names:
msg = f"Hello, {name.title()}!"
print(msg)
# usernames = ['hannah', 'ty', 'magot']
# greet_users(usernames)
| [
"melihcanyardi@hotmail.com"
] | melihcanyardi@hotmail.com |
9c33821a24871a6e6eabc82aca793dca44554b7d | 38fff7bdefd8d62a740d51329b50d0e1e49258bb | /infra/cifuzz/cifuzz_end_to_end_test.py | 30e28beda330c621ac3a0f5b9d1ee2dc11a5e463 | [
"Apache-2.0"
] | permissive | google/oss-fuzz | 026384c2ada61ef68b147548e830f60730c5e738 | f0275421f84b8f80ee767fb9230134ac97cb687b | refs/heads/master | 2023-08-31T23:30:28.157702 | 2023-08-31T21:49:30 | 2023-08-31T21:49:30 | 63,809,205 | 9,438 | 2,315 | Apache-2.0 | 2023-09-14T20:32:19 | 2016-07-20T19:39:50 | Shell | UTF-8 | Python | false | false | 1,841 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End-to-End tests for CIFuzz."""
import os
import unittest
import run_cifuzz
import test_helpers
CIFUZZ_DIR = os.path.dirname(os.path.abspath(__file__))
EXTERNAL_PROJECT_PATH = os.path.join(CIFUZZ_DIR, 'test_data',
'external-project')
# This test will fail if not run as root because the fuzzer build process
# creates binaries that only root can write to.
# Use a seperate env var to keep this seperate from integration tests which
# don't have this annoying property.
@unittest.skipIf(not os.getenv('END_TO_END_TESTS'),
'END_TO_END_TESTS=1 not set')
class EndToEndTest(unittest.TestCase):
"""End-to-End tests for CIFuzz."""
def setUp(self):
test_helpers.patch_environ(self, runner=True)
def test_simple(self):
"""Simple end-to-end test using run_cifuzz.main()."""
os.environ['REPOSITORY'] = 'external-project'
os.environ['PROJECT_SRC_PATH'] = EXTERNAL_PROJECT_PATH
os.environ['FILESTORE'] = 'no_filestore'
os.environ['NO_CLUSTERFUZZ_DEPLOYMENT'] = 'True'
with test_helpers.docker_temp_dir() as temp_dir:
os.environ['WORKSPACE'] = temp_dir
# TODO(metzman): Verify the crash, affected fuzzers, and other things.
self.assertEqual(run_cifuzz.main(), 1)
| [
"noreply@github.com"
] | google.noreply@github.com |
d00d266ba80663b9ebfeb3cc61a78e1314118b06 | 2491df3f643539e6055bb0b2a4b659474c57491f | /nonRepeating.py | 448672a1d5248162813e6d1da3a085a5576289a6 | [] | no_license | ghilbing/Ejemplos | 85efc91346028b8a3d26d7680d9286b26234c771 | 339a45ef48c9a61002a01f7c823cc42d34fab409 | refs/heads/master | 2021-05-13T13:58:33.010157 | 2018-02-26T20:44:44 | 2018-02-26T20:44:44 | 116,724,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | def nonRepeating(A):
dictionary = {}
for c in A:
if c in dictionary:
dictionary[c] += 1
else:
dictionary[c] = 1
for c in A:
if dictionary[c] == 1:
return c
return None
A = "aabbdbc"
print nonRepeating(A) | [
"ghilbing@gmail.com"
] | ghilbing@gmail.com |
ed6e2fd9aa9f97143e4c73ed7f12ea853cca8b45 | e2f0806ca1cdd887ea40d050a19fa2710427bd38 | /도전 문제/04주차_그리디/1339_단어 수학/banghyungjin_1339.py | bace9ef7dcea98724223d5ffa32bf5cf39bdf06e | [] | no_license | JY-Dev/AlgorithmStudy-1 | 001f94d80097c850c79eeb2bc86971a01aa5bd5d | 2ad1df0fd65c72a6f6d1feeba09f889000ff8c15 | refs/heads/main | 2023-08-21T18:38:18.235994 | 2021-09-28T07:07:11 | 2021-09-28T07:07:11 | 406,208,087 | 1 | 0 | null | 2021-09-14T03:14:32 | 2021-09-14T03:14:31 | null | UTF-8 | Python | false | false | 1,904 | py | import sys
num_of_letters = int(sys.stdin.readline().split()[0]) # 단어의 개수
letters = {} # 알파벳과 그 알파벳이 나온 숫자 딕셔너리
answer = 0 # 정답
for i in range(num_of_letters): # 단어의 개수 만큼
input_letter = sys.stdin.readline().split()[0] # 단어들을 읽어옴
for letter in range(len(input_letter)): # 각 읽어온 단어들을 알파벳 하나씩 나눔
if not(input_letter[letter] in letters): # letters에 해당 알파벳이 없으면
letters[input_letter[letter]] = 10 ** (len(input_letter) - letter - 1) # 새로 넣음 이때 key는 알파벳, value는 해당 알파벳이 가리키는 자리수
else: # letters에 해당 알파벳이 있으면
letters[input_letter[letter]] += 10 ** (len(input_letter) - letter - 1) # 해당 원소의 value에 해당 알파벳이 가리키는 자리수 더해줌
letters = sorted(letters.items(), reverse=True, key=lambda x: (x[1])) # letters를 각 원소의 value 값으로 정렬
for i in range(len(letters)): # letters를 처음부터 탐색
answer += letters[i][1] * (9 - i) # 순서대로 9부터 역순으로 대입 후 value에 곱해서 answer에 더함
print(answer) # 정답 출력
| [
"noreply@github.com"
] | JY-Dev.noreply@github.com |
f36e3d60041a50a234a84aba86475d750f8bf046 | 8b7778d3c65f3688105e43718152da2c734ffa26 | /2.Travel_Request/data/Get_TestData/Get_D1_AM_Data.py | f229dca9ebff929a4e5484ebfc330be6b5e81771 | [] | no_license | zzworkaccount/OpenSourceLibrary | ab49b3f431c0474723dfad966ca09e29b07527eb | 0f99f881eb8a1f4ddebbc5e7676289d01e6ffe19 | refs/heads/main | 2023-01-29T05:02:56.341004 | 2020-12-03T12:05:59 | 2020-12-03T12:05:59 | 315,920,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | # 从Save_TestData拿到测试数据
from tools.util import Utility
class Get_SM_TestData:
# 从Excel中获取登录的测试数据(查询演职人员)
@classmethod
def get_login_excel_data_query_actor(cls, row=0):
login_info = Utility.get_json\
(Utility.get_root_path() + '\\conf\\Excel_conf\\D_AM.conf')[0]
login_data = Utility.get_excel(login_info,row)
return login_data
# 从Excel中获取登录的测试数据(查询演职人员)
@classmethod
def get_login_excel_data_delete_actor(cls, row=0):
login_info = Utility.get_json\
(Utility.get_root_path() + '\\conf\\Excel_conf\\D_AM.conf')[1]
login_data = Utility.get_excel(login_info,row)
return login_data
# 从Excel中获取登录的测试数据(新增演职人员)
@classmethod
def get_login_excel_data_add_actor(cls, row=0):
login_info = Utility.get_json \
(Utility.get_root_path() + '\\conf\\Excel_conf\\D_AM.conf')[2]
login_data = Utility.get_excel(login_info, row)
return login_data
if __name__ == '__main__':
print(Get_SM_TestData.get_login_excel_data_add_actor(1))
| [
"1434895836@qq.com"
] | 1434895836@qq.com |
aa03a2a0ee9d005770a80b0f38d0e4aaf1b36800 | 450448e0ddb786fd13cfe9f6df5aa47573769fdc | /tripleaxisproject/gui/bspline3.py | 2013aaaf1ff207a20edbdcfe3f17b93f76a3155a | [] | no_license | williamratcliff/tripleaxisproject | 70bbd9ab5f7f1d2f30ced18b0887e51a1e3551e8 | 8649730ccc03e7d172ad41db776e2df9b463f3d6 | refs/heads/master | 2021-01-19T20:18:25.875294 | 2018-09-12T20:43:46 | 2018-09-12T20:43:46 | 32,125,247 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,791 | py | import numpy as N
def max(a,b):
return (a<b).choose(a,b)
def min(a,b):
return (a>b).choose(a,b)
def lookup(a,b):
return a.searchsorted(b)
def cat(*args):
return N.concatenate(args)
# f, f', f'', f''' = bspline3(knot, control, t, nderiv=0)
# Evaluate the B-spline specified by the given knot sequence and
# control values at the parametric points t. The knot sequence
# should be four elements longer than the control sequence.
# Returns up to p(t), p'(t), p''(t), p'''(t) depending on nderiv.
# bspline3(knot, control, t, clamp=True)
# Clamps the spline to the value of the final control point beyond
# the ends of the knot sequence. Default is 'zero' for clamping
# the spline to zero.
def bspline3(knot, control, t, clamp=False, nderiv=0):
degree = len(knot) - len(control);
if degree != 4: raise ValueError, "must have two extra knots at each end"
if clamp:
# Alternative approach spline is clamped to initial/final control values
control = cat([control[0]]*(degree-1), control, [control[-1]])
else:
# Traditional approach: spline goes to zero at +/- infinity.
control = cat([0]*(degree-1), control, [0])
# Deal with values outside the range
valid = (t > knot[0]) & (t <= knot[-1])
tv = t[valid]
f = N.zeros(t.shape)
df = N.zeros(t.shape)
d2f = N.zeros(t.shape)
d3f = N.zeros(t.shape)
f[t<=knot[0]] = control[0]
f[t>=knot[-1]] = control[-1]
# Find B-Spline parameters for the individual segments
end = len(knot)-1
segment = lookup(knot,tv)-1
tm2 = knot[max(segment-2,0)]
tm1 = knot[max(segment-1,0)]
tm0 = knot[max(segment-0,0)]
tp1 = knot[min(segment+1,end)]
tp2 = knot[min(segment+2,end)]
tp3 = knot[min(segment+3,end)]
P4 = control[min(segment+3,end)]
P3 = control[min(segment+2,end)]
P2 = control[min(segment+1,end)]
P1 = control[min(segment+0,end)]
# Compute second and third derivatives
if nderiv > 1:
# First derivative is available almost for free
# Second or more derivative requires extra computation
Q4 = (P4 - P3) * 3 / (tp3-tm0)
Q3 = (P3 - P2) * 3 / (tp2-tm1)
Q2 = (P2 - P1) * 3 / (tp1-tm2)
R4 = (Q4 - Q3) * 2 / (tp2-tm0)
R3 = (Q3 - Q2) * 2 / (tp1-tm1)
S4 = (R4 - R3) * 1 / (tp1-tm0)
R4 = ( (tv-tm0)*R4 + (tp1-tv)*R3 ) / (tp1 - tm0)
d2f[valid] = R4
d3f[valid] = S4
# Compute function value and first derivative
P4 = ( (tv-tm0)*P4 + (tp3-tv)*P3 ) / (tp3 - tm0)
P3 = ( (tv-tm1)*P3 + (tp2-tv)*P2 ) / (tp2 - tm1)
P2 = ( (tv-tm2)*P2 + (tp1-tv)*P1 ) / (tp1 - tm2)
P4 = ( (tv-tm0)*P4 + (tp2-tv)*P3 ) / (tp2 - tm0)
P3 = ( (tv-tm1)*P3 + (tp1-tv)*P2 ) / (tp1 - tm1)
fastdf = (P4-P3) * 3 / (tp1-tm0)
P4 = ( (tv-tm0)*P4 + (tp1-tv)*P3 ) / (tp1 - tm0)
# Check that fast df calculation matches the direct Q4 calculation.
# if nderiv > 1: print "|fast df - df| = ",norm(df-Q4)
df[valid] = fastdf
f[valid] = P4
if nderiv == 0: return f
elif nderiv == 1: return f,df
elif nderiv == 2: return f,df,d2f
else: return f,df,d2f,d3f
# Assertions left over from original octave code --- I'm not ready
# to write a generic assert yet in Python
#!assert(bspline3([0 0 0 1 1 3 4 6 6 6],[0 0 0 0 0 0],2.2),0,10*eps);
#!assert(bspline3([0 0 0 1 1 3 4 6 6 6],[1 1 1 1 1 1],2.2),1,10*eps);
#!assert(bspline3([0 0 0 0 1 4 5 5 5 5],[1:6],2),761/240,10*eps);
#!assert(bspline3([0 0 0 0 1 4 5 5 5 5],[1:6],[2,2]),[761/240,761/240],10*eps);
#!assert(bspline3([0 0 0 1 1 3 4 6 6 6],[1:6],3.2),4.2976,10*eps);
import numpy as nx
class BSpline3:
"""Manage control points for parametric B-spline."""
# TODO: this class doesn't give much control over knots.
def __init__(self, x, y, clamp=True):
n = len(x)
self.knot = nx.concatenate([[0.]*2, range(n), [n-1]*2])
self.x = x
self.y = y
self.clamp = clamp
def __len__(self):
"""Count the knots"""
return len(self.x)
def __getitem__(self, i):
"""Set control point for a knot"""
return self.x[i], self.y[i]
def __setitem__(self, i, pair):
"""Get control point for a knot"""
self.x[i],self.y[i] = pair
def __delitem__(self, i):
"""Delete a knot"""
if i < 0 or i >= len(self.x): raise IndexError
self.x = nx.delete(self.x,i)
self.y = nx.delete(self.y,i)
self.knot = nx.delete(self.knot,i+2)
if i == 0:
self.knot[0:2] = self.knot[2]
elif i == len(self.x)-2:
self.knot[-2:-1] = self.knot[-3]
def __call__(self, t):
"""Evalaute a B-spline at points t"""
fx = bspline3(self.knot,self.x,t,clamp=self.clamp)
fy = bspline3(self.knot,self.y,t,clamp=self.clamp)
return fx,fy
def append(self,x,y):
"""Add a knot to the end"""
self.x = nx.concatenate([self.x,[x]])
self.y = nx.concatenate([self.y,[y]])
k = self.knot[-1]+1
self.knot = nx.concatenate([self.knot,[k]])
self.knot[-3:-1] = k
def sample(self,n=400):
"""Sample the B-spline at n equidistance points in t"""
t = nx.linspace(self.knot[2],self.knot[-3],n)
return self.__call__(t)
def demo():
import pylab
t = N.linspace(-1,7,40 );
knot = N.array([0, 1, 1, 3, 4, 6],'f')
#knot = N.array([0, 0, 1, 4, 5, 5],'f')
control = N.array([1, 2, 3, 2, 1, 2],'f')
knotseq = cat([knot[0]-1,knot[0]], knot, [knot[-1],knot[-1]+1])
f = bspline3(knotseq,control,t,clamp=True);
#print zip(t,f)
pylab.plot(t,f,'-',knot,control,'x');
pylab.show()
if __name__ == "__main__": demo()
| [
"william.ratcliff@e28a235e-f944-0410-a937-4d0c1e564b32"
] | william.ratcliff@e28a235e-f944-0410-a937-4d0c1e564b32 |
87c991b0181b8c4ceac044906c173fd51e8341be | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-CoreText/PyObjCTest/test_coretext.py | 1cbd69a8ce38bd6cf94b9f0fff3c13bac2b8271a | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | from PyObjCTools.TestSupport import *
from CoreText import *
class TestCoreText(TestCase):
def testConstants(self):
self.assertEqual(kCTVersionNumber10_5, 0x00020000)
self.assertEqual(kCTVersionNumber10_5_2, 0x00020001)
self.assertEqual(kCTVersionNumber10_5_3, 0x00020002)
self.assertEqual(kCTVersionNumber10_5_5, 0x00020003)
self.assertEqual(kCTVersionNumber10_6, 0x00030000)
self.assertEqual(kCTVersionNumber10_7, 0x00040000)
self.assertEqual(kCTVersionNumber10_8, 0x00050000)
self.assertEqual(kCTVersionNumber10_9, 0x00060000)
self.assertEqual(kCTVersionNumber10_10, 0x00070000)
self.assertEqual(kCTVersionNumber10_11, 0x00080000)
self.assertEqual(kCTVersionNumber10_12, 0x00090000)
self.assertEqual(kCTVersionNumber10_13, 0x000A0000)
self.assertEqual(kCTVersionNumber10_14, 0x000B0000)
self.assertEqual(kCTVersionNumber10_15, 0x000C0000)
def testFunctions(self):
v = CTGetCoreTextVersion()
self.assertIsInstance(v, (int, long))
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
a2c1ecc98cc0627fd78f90919a8545ff4de92e84 | 8426ae07c324370d87a0f009f1aae872a3efade1 | /tools/viewing/view_histogram.py | 47d3851ffbfb80add5f8d6c95cb0fc2809ba6f54 | [] | no_license | TomChow01/docrec-tifs18 | 2d4e95592732d9b42c023c2be912ba52cb1054f5 | 08e8099a9f5c95f8d513083acc321bebabb41245 | refs/heads/master | 2022-02-12T05:58:13.276509 | 2019-06-24T11:48:33 | 2019-06-24T11:48:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | import sys
sys.path.append('../')
import matplotlib.pyplot as plt
import cv2
import numpy as np
from scipy import stats
img_filename = sys.argv[1]
fig = plt.figure(figsize=(12, 12), dpi=300)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# Open as graysacle
gray = cv2.imread(img_filename, cv2.IMREAD_GRAYSCALE)
# Open as rgb
rgb = cv2.imread(img_filename, cv2.IMREAD_COLOR)
counts = np.bincount(gray.flatten())
probs = counts / float(counts.sum())
# Stats
entropy = stats.entropy(probs)
skew = stats.skew(probs)
kurtosis = stats.kurtosis(probs)
weber_contrast = (255 - gray).sum() / (255.0 * gray.size)
print('Shannon entropy % .5f' % entropy)
print('Skew % .5f' % skew)
print('Kurtosis % .5f' % kurtosis)
print('Weber contrast % .5f' % weber_contrast)
ax1.plot(probs)
ax2.plot(probs.cumsum())
plt.show() | [
"paixao@gmail.com"
] | paixao@gmail.com |
5991535deca4ef0b5cf2d84263c3b15c13c62e3c | 609d037275a6b7c6aeae194c6ac0fe0a5ffc72e5 | /train.py | 882a10809c965c4a008846a208676957130e4b58 | [] | no_license | RobertSamoilescu/Robotics | e23da73073a819a158c37701d7aede48e7b8946e | 58a7fb61fd2f21bf0af5f73c5ffb2efe42b22a31 | refs/heads/master | 2020-05-24T20:51:52.256121 | 2019-05-22T17:58:01 | 2019-05-30T20:56:23 | 187,463,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,515 | py | import torch
import model
import sys
from logger import Logger
from model import *
# define constants
batch_size = 64
num_workers = 7
# define loaders
train_dataset = UPBDataset("train", augmentation=True)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
validation_dataset = UPBDataset("validation", augmentation=False)
validation_dataloader = DataLoader(validation_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
# initialize logger
logger = Logger('./logs')
# initialize model
net = model.VGGBased(2).cuda()
net = net.train()
print(net)
# criterion & optimizer
criterion = torch.nn.MSELoss()
optimizer = torch.optim.RMSprop(net.parameters(), lr=1e-4)
best_loss = sys.maxsize
def train(epoch):
global best_loss
running_loss = None
for i, data in enumerate(train_dataloader, 0):
# get inputs
X, Y_gt = data['img'], data['steer_coord']
# send to gpu
X = X.cuda()
Y_gt = Y_gt.cuda()
# zero the parameters gradient
optimizer.zero_grad()
# forward, backward & optimize
Y = net(X)
loss = criterion(Y, Y_gt)
loss.backward()
optimizer.step()
# update running loss
running_loss = loss.item() if running_loss is None else 0.9 * running_loss + 0.1 * loss.item()
# tensor board plots & print
if i % max(1, (len(train_dataloader) // 50)) == 0:
# display
print(' * [%d, %5d] MSE loss training: %.6f, Euclidian distance training: %.6f' % (epoch, i, running_loss, np.sqrt(running_loss)))
# tensorboard plots
step = epoch * len(train_dataloader) + i
logger.scalar_summary('MSE loss training', loss.item(), step)
logger.scalar_summary('Euclidian distance training', torch.sqrt(loss).item(), step)
for tag, value in net.named_parameters():
tag = tag.replace('.', '/')
logger.histo_summary(tag, value.data.cpu().numpy(), step)
logger.histo_summary(tag + '/grad', value.grad.data.cpu().numpy(), step)
if i % max(1, (len(train_dataloader) // 20)) == 0:
eval_loss = evaluate(step)
if eval_loss < best_loss:
best_loss = eval_loss
torch.save(net, './checkpoints/best_model')
print("Model saved")
def evaluate(epoch):
total_loss = 0
net.eval()
for i, data in enumerate(validation_dataloader, 0):
# get inputs
X, Y_gt = data['img'], data['steer_coord']
# send to gpu
X = X.cuda()
Y_gt = Y_gt.cuda()
# forward, backward & optimize
with torch.no_grad():
Y = net(X)
loss = criterion(Y, Y_gt)
total_loss += loss.item() * X.shape[0]
mean_loss = total_loss / len(validation_dataset)
# logger
print("\t * [%d] MES loss validation: %.6f, Euclidian distance validation: %.6f"
% (epoch, mean_loss, np.sqrt(mean_loss)))
logger.scalar_summary("MSE loss validation", mean_loss, epoch)
logger.scalar_summary("Euclidian distance validation", np.sqrt(mean_loss), epoch)
net.train()
return mean_loss
def main():
for epoch in range(1, 100000):
train(epoch)
if __name__ == "__main__":
main()
| [
"robert.samoilescu@gmail.com"
] | robert.samoilescu@gmail.com |
52eb6c8fd544416acfe3ad485d74995d1f24f22e | 4040b4103295141d979e004e34426aefed01f5d6 | /student_list.py | 687cee506be61e27919b8792662e6e68166ea757 | [] | no_license | mehulchopradev/bryan-python | f2fe55d8a6043333127bef7af9f359aae008b74f | 7ef405b40855c6fe7efac981c6b9d7b6aadc3237 | refs/heads/master | 2020-08-23T07:23:21.109175 | 2019-10-30T13:04:05 | 2019-10-30T13:04:05 | 216,570,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | from com.abc.college.student import Student
'''slist = [
Student('mehul', 'm', 10),
Student('bryan', 'm', 5),
Student('jane', 'f', 23)
]'''
smap = {
10: Student('mehul', 'm', 10),
5: Student('bryan', 'm', 5),
23: Student('jane', 'f', 23)
}
roll = int(input('Enter roll to search : '))
'''for student in slist:
if student.roll == roll:
print(student.get_details())
break
else:
# will execute when the corresponding for block is completely exhausted
# when in the corresponding for block there is no break encountered
print('Student not found')'''
if roll in smap:
student = smap[roll]
print(student.get_details())
else:
print('Student not found') | [
"Mehul.Chopra@avalara.com"
] | Mehul.Chopra@avalara.com |
e7f1d3c40f50cd034136f6d0db6199b787754ea5 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merraRF882/868-tideGauge.py | ba649b09866db77f7861a146ed63d959be6d0543 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,456 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 868
y = 869
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
da510a13d601fed9b2e8679f23a3ef9b8f484811 | 428853ca880fb863ef708022ccac1258c25fc850 | /vart/network/nn_base.py | 0902104c3683868705c0917fbf3164f205fc2d1f | [
"Apache-2.0"
] | permissive | NicoRenaud/vArt | 2ddb47cd3ee9dd725096c40e69b153db2f78eb03 | f6d50f9499c2fca7e07bf8a3b68d81f70649fb10 | refs/heads/master | 2020-07-24T08:04:18.602264 | 2019-09-12T16:27:32 | 2019-09-12T16:27:32 | 207,857,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,090 | py | import autograd.numpy as np
from autograd import elementwise_grad as egrad
from autograd import hessian, jacobian
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import grad, Variable
from time import time
class WaveFunction(nn.Module):
def __init__(self,nelec, ndim, kinetic='auto'):
super(WaveFunction, self).__init__()
self.ndim = ndim
self.nelec = nelec
self.ndim_tot = self.nelec*self.ndim
self.kinetic = kinetic
def forward(self,x):
''' Compute the value of the wave function.
for a multiple conformation of the electrons
Args:
parameters : variational param of the wf
pos: position of the electrons
Returns: values of psi
'''
raise NotImplementedError()
def electronic_potential(self,pos):
'''Compute the potential of the wf points
Args:
pos: position of the electron
Returns: values of Vee * psi
'''
raise NotImplementedError()
def nuclear_potential(self,pos):
'''Compute the potential of the wf points
Args:
pos: position of the electron
Returns: values of Ven * psi
'''
raise NotImplementedError()
def nuclear_repulsion(self):
'''Compute the nuclear repulsion term
Returns: values of Vnn * psi
'''
raise NotImplementedError()
def kinetic_energy(self,pos,out=None):
'''Main switch for the kinetic energy.'''
if self.kinetic == 'auto':
return self.kinetic_energy_autograd(pos,out)
elif self.kinetic == 'fd':
return self.kinetic_energy_finite_difference(pos)
else:
raise ValueError('kinetif %s not recognized' %self.kinetic)
def kinetic_energy_autograd(self,pos,out=None):
'''Compute the second derivative of the network
output w.r.t the value of the input.
This is to compute the value of the kinetic operator.
Args:
pos: position of the electron
out : preomputed values of the wf at pos
Returns:
values of nabla^2 * Psi
'''
if out is None:
out = self.forward(pos)
# compute the jacobian
z = Variable(torch.ones(out.shape))
jacob = grad(out,pos,grad_outputs=z,create_graph=True,only_inputs=False)[0]
# compute the diagonal element of the Hessian
z = Variable(torch.ones(jacob.shape[0]))
hess = torch.zeros(jacob.shape[0])
for idim in range(jacob.shape[1]):
tmp = grad(jacob[:,idim],pos,
grad_outputs=z,
create_graph=True,
allow_unused=True,
only_inputs=False)[0]
hess += tmp[:,idim]
return -0.5 * hess.view(-1,1)
def kinetic_energy_finite_difference(self,pos,eps=1E-3):
'''Compute the second derivative of the network
output w.r.t the value of the input using finite difference.
This is to compute the value of the kinetic operator.
Args:
pos: position of the electron
out : preomputed values of the wf at pos
eps : psilon for numerical derivative
Returns:
values of nabla^2 * Psi
'''
nwalk = pos.shape[0]
ndim = pos.shape[1]
out = torch.zeros(nwalk,1)
for icol in range(ndim):
pos_tmp = pos.clone()
feps = -2*self.forward(pos_tmp)
pos_tmp = pos.clone()
pos_tmp[:,icol] += eps
feps += self.forward(pos_tmp)
pos_tmp = pos.clone()
pos_tmp[:,icol] -= eps
feps += self.forward(pos_tmp)
out += feps/(eps**2)
return -0.5*out.view(-1,1)
def local_energy_save(self,pos):
''' local energy of the sampling points.'''
return self.kinetic_energy(pos)/self.forward(pos) \
+ self.nuclear_potential(pos) \
+ self.electronic_potential(pos) \
+ self.nuclear_repulsion()
def local_energy(self,pos):
''' local energy of the sampling points.'''
#t0 = time()
wf = self.forward(pos)
ke = self.kinetic_energy(pos,out=wf)
#print('Kinetic done in %f' %(time()-t0))
return ke/wf \
+ self.nuclear_potential(pos) \
+ self.electronic_potential(pos) \
+ self.nuclear_repulsion()
def energy(self,pos):
'''Total energy for the sampling points.'''
return torch.mean(self.local_energy(pos))
def variance(self, pos):
'''Variance of the energy at the sampling points.'''
return torch.var(self.local_energy(pos))
def pdf(self,pos):
'''density of the wave function.'''
return (self.forward(pos)**2).reshape(-1)
| [
"nicolas.gm.renaud@gmail.com"
] | nicolas.gm.renaud@gmail.com |
7022095a59a50e7551cff88e0bfad6084ddddf93 | ff4fe07752b61aa6404f85a8b4752e21e8a5bac8 | /challenge-215/roger-bell-west/python/ch-2.py | 0dca0440e596590236e0101c7b6d153af66bb44f | [] | no_license | choroba/perlweeklychallenge-club | 7c7127b3380664ca829158f2b6161c2f0153dfd9 | 2b2c6ec6ece04737ba9a572109d5e7072fdaa14a | refs/heads/master | 2023-08-10T08:11:40.142292 | 2023-08-06T20:44:13 | 2023-08-06T20:44:13 | 189,776,839 | 0 | 1 | null | 2019-06-01T20:56:32 | 2019-06-01T20:56:32 | null | UTF-8 | Python | false | false | 640 | py | #! /usr/bin/python3
def numberplacement(a0, ct):
a = [1] + a0 + [1]
s = 0
tt = 0
for i in range(1, len(a)):
if a[i - 1] == 1 and a[i] == 0:
s = i
elif a[i - 1] == 0 and a[i] == 1:
tt += (i - s) // 2
return ct <= tt
import unittest
class TestNumberplacement(unittest.TestCase):
def test_ex1(self):
self.assertEqual(numberplacement([1, 0, 0, 0, 1], 1), True, 'example 1')
def test_ex2(self):
self.assertEqual(numberplacement([1, 0, 0, 0, 1], 2), False, 'example 2')
def test_ex3(self):
self.assertEqual(numberplacement([1, 0, 0, 0, 0, 0, 0, 0, 1], 3), True, 'example 3')
unittest.main()
| [
"roger@firedrake.org"
] | roger@firedrake.org |
289e20471c5101745fd5c8ae3a1b183a640d13f0 | 6bf4e54f8ae95582b73bb969ba44069c64e87651 | /kdhi/main_site/migrations/0064_auto_20200604_1726.py | 274bb623cea8b66901484390548f0a57fc0e0532 | [] | no_license | speedycowenator/kdhi_migration | 4bc983c4656a2a87cb056461bfb4219e38da1a85 | 422b2e3f142a30c81f428fb8eaa813e4a71d56fc | refs/heads/master | 2022-11-14T13:27:51.520697 | 2020-07-02T19:31:12 | 2020-07-02T19:31:12 | 246,138,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # Generated by Django 2.2.5 on 2020-06-04 21:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_site', '0063_auto_20200601_2331'),
]
operations = [
migrations.AlterField(
model_name='position',
name='position_status',
field=models.CharField(choices=[('Active', 'Active'), ('Unclear', 'Unclear'), ('Removed', 'Removed'), ('Likely', 'Likely')], default='Active', max_length=20),
),
]
| [
"54556114+speedycowenator@users.noreply.github.com"
] | 54556114+speedycowenator@users.noreply.github.com |
1d2be9dea9766f873a9dbdadadf1fdabc7f5fa98 | fecca37427d8f6d3b2e818c16d0cb4d4d26a0092 | /job_helper.py | 72ac9509ef3542d67c0968affd81648238ea45e7 | [
"MIT"
] | permissive | TheoPis/cutmix-semisup-seg | 88993d3582b59c588bc8470f3a679879330ddb88 | d5f7f58a202ec16d0276eec5a1160fd14c1d4b26 | refs/heads/master | 2022-11-13T10:16:19.764384 | 2020-07-10T08:46:07 | 2020-07-10T08:46:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,414 | py | import os
import inspect
import sys, re
LOG_PREFIX = re.compile('log_(\d+)')
JOB_DIR_PREFIX = re.compile('(\d+)')
class LogAlreadyExistsError (Exception):
pass
class Logger (object):
def __init__(self, path, stream):
self.path = path
self.stream = stream
def write(self, x):
with open(self.path, 'a+') as f_out:
f_out.write(x)
self.stream.write(x)
def flush(self):
self.stream.flush()
class SubmitConfig (object):
def __init__(self, job_name, job_desc, enumerate_job_names):
res_dir = os.path.join('results', job_name)
if not os.path.exists(res_dir):
os.makedirs(res_dir, exist_ok=True)
if job_desc == 'none':
log_path = None
job_out_dir = None
else:
if enumerate_job_names:
job_index = 0
res_dir_contents = os.listdir(res_dir)
for name in res_dir_contents:
m = LOG_PREFIX.match(name)
if m is not None:
job_index = max(job_index, int(m.group(1)) + 1)
m = JOB_DIR_PREFIX.match(name)
if m is not None:
job_index = max(job_index, int(m.group(1)) + 1)
log_path = os.path.join(res_dir, 'log_{:04d}_{}.txt'.format(job_index, job_desc))
job_out_dir = os.path.join(res_dir, '{:04d}_{}'.format(job_index, job_desc))
else:
log_path = os.path.join(res_dir, 'log_{}.txt'.format(job_desc))
job_out_dir = os.path.join(res_dir, job_desc)
if os.path.exists(log_path) or os.path.exists(job_out_dir):
raise LogAlreadyExistsError
self.log_path = log_path
self.job_out_dir = job_out_dir
# Run-dir created on the fly
self.__run_dir = None
if self.log_path is not None:
self.__stdout = Logger(self.log_path, sys.stdout)
self.__stderr = Logger(self.log_path, sys.stderr)
@property
def run_dir(self):
if self.__run_dir is None:
# Make the run dir to receive output
self.__run_dir = self.job_out_dir
os.makedirs(self.__run_dir, exist_ok=True)
return self.__run_dir
def connect_streams(self):
if self.log_path is not None:
sys.stdout = self.__stdout
sys.stderr = self.__stderr
def disconnect_streams(self):
if self.log_path is not None:
sys.stdout = self.__stdout.stream
sys.stderr = self.__stderr.stream
# dnnlib not available; we're not running at nVidia Helsinki; run locally
def job(job_name, enumerate_job_names=True):
"""
Decorator to turn a function into a job submitter.
Usage:
>>> @job('wait_some_time')
... def wait_some_time(submit_config: SubmitConfig, iteration_count):
... # Create a run context (hides low level details, exposes simple API to manage the run)
... with dnnlib.RunContext(submit_config) as ctx:
...
... fn = os.path.join(submit_config.run_dir, "output.txt")
... with open(fn, 'w') as f:
... f.write("Works!")
...
... print('Training...')
... for i in range(iteration_count):
... if ctx.should_stop():
... break
...
... time.sleep(1.0)
... ctx.update(loss='%2f' % i, cur_epoch=i, max_epoch=iteration_count)
To submit a job:
>>> wait_some_time.submit(on='local', job_desc='description_to_identify_specific_job', iteration_count=50)
:param job_name: The name to be given to the job
:param module_name: If necessary, name the module in which the job function resides
:param docker_image: Provide the path to the docker image required for this job
:param num_gpus: The number of GPUs required
:param enumerate_job_names: Enumerated job name prefix
"""
valid_targets = {'local', 'any', 'maxwell', 'pascal', 'volta', 'volta_or_pascal'}
def decorate(job_fn):
def run_job(**kwargs):
specific_job_name = kwargs.pop('job_name', None)
if specific_job_name == '':
specific_job_name = None
if specific_job_name is None:
specific_job_name = job_name
quota_group = kwargs.pop('quota_group', None)
if quota_group is not None and quota_group != '':
raise ValueError('quota_group not supported when dnnlib is not available')
job_desc_arg = kwargs.pop('job_desc', None)
if job_desc_arg is None or job_desc_arg == '':
job_desc_arg = specific_job_name
try:
submit_config = SubmitConfig(specific_job_name, job_desc_arg, enumerate_job_names)
except LogAlreadyExistsError:
print('Job {}:{} already executed; skipping'.format(specific_job_name, job_desc_arg))
else:
print('[NO dnnlib] logging to {}'.format(submit_config.log_path))
submit_config.connect_streams()
try:
job_fn(submit_config, **kwargs)
finally:
submit_config.disconnect_streams()
job_fn.submit = run_job
return job_fn
return decorate
| [
"brittix1023@gmail.com"
] | brittix1023@gmail.com |
32c5685349516c253ce1c6823ff31d31d95fb77e | c9f1cc3a6715917d658a6e525b7c2d35b0380f9f | /Non_canonical_introns/SIM2/random_print.py | 6c29297bbbdef8a3c5f1bced21b3825b26a980b3 | [] | no_license | geparada/my_src | 4f84887130b985e84aad3d0d35e85911087d9b4f | 8d64f7ef51e1f74303ca88beb0ee964f546d8301 | refs/heads/master | 2021-01-17T01:50:50.414690 | 2017-03-14T10:01:50 | 2017-03-14T10:01:50 | 20,638,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | import sys
import csv
import random
def ran_printer(bigfile, P):
reader = csv.reader(open(bigfile), dialect='excel-tab' )
for row in reader:
R = random.random()
if R<=P:
print " ".join(row)
if __name__ == '__main__':
ran_printer(sys.argv[1], float(sys.argv[2]) )
| [
"geparada@omics.(none)"
] | geparada@omics.(none) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.