blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a42d49d7d83b0b0520c6e6d394d79b1e6c4fd48
|
b8302a17ad124b2432380c7274e4780ec5adfe55
|
/exercises/de/solution_04_03.py
|
e63f7c7a9d4320eaae8436a4c058573e32639ff4
|
[
"MIT",
"CC-BY-NC-4.0"
] |
permissive
|
FrankGrimm/spacy-course
|
10da4ebf976d93aec50aa1b200019b4217f4043e
|
5e09ef9d296dad2b0fd5ff1945f4cf9a55109906
|
refs/heads/master
| 2022-04-24T18:18:06.202131
| 2020-04-21T19:17:09
| 2020-04-21T19:17:09
| 257,692,388
| 1
| 0
|
MIT
| 2020-04-21T19:14:21
| 2020-04-21T19:14:20
| null |
UTF-8
|
Python
| false
| false
| 650
|
py
|
import json
from spacy.matcher import Matcher
from spacy.lang.de import German
with open("exercises/de/iphone.json") as f:
TEXTS = json.loads(f.read())
nlp = German()
matcher = Matcher(nlp.vocab)
# Zwei Tokens, deren kleingeschriebene Formen "iphone" und "x" sind
pattern1 = [{"LOWER": "iphone"}, {"LOWER": "x"}]
# Token mit der kleingeschriebenen Form "iphone" und eine Ziffer
pattern2 = [{"LOWER": "iphone"}, {"IS_DIGIT": True}]
# Füge Patterns zum Matcher hinzu und überprüfe die Resultate
matcher.add("GADGET", None, pattern1, pattern2)
for doc in nlp.pipe(TEXTS):
print([doc[start:end] for match_id, start, end in matcher(doc)])
|
[
"ines@ines.io"
] |
ines@ines.io
|
91ed919fe4f82d66d4c1e181233dc01892ee1182
|
420376c5a1fbf8a4572545a9c891a0f8f204ed5b
|
/scrapy_amazon/items.py
|
d2aeed20eb2ea2833ebfb79da6fce00b903d6891
|
[] |
no_license
|
kishoresurana/scrapy_amazon
|
946fb8fe198736ba4233a2f3727ca1a1873ae937
|
bbb72cdb5f468d5c8b605d273bb5c93b9a2b249a
|
refs/heads/master
| 2020-12-25T21:55:35.192394
| 2014-07-27T20:09:24
| 2014-07-27T20:09:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScrapyAmazonItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
price = scrapy.Field()
condition = scrapy.Field()
seller = scrapy.Field()
delivery = scrapy.Field()
title = scrapy.Field()
date = scrapy.Field()
|
[
"aniversarioperu1@gmail.com"
] |
aniversarioperu1@gmail.com
|
1bd983fda08cc124332618ea8d57b23525828d1e
|
9e3335031701144d34780466febb13ab3e5f7ce6
|
/k/flask1.py
|
6fc6d0b81b9c9edea46d9d9dd5267dc1c5bce20d
|
[] |
no_license
|
Ananthan7/students-portal-flask
|
876a29bd48c94a2eba2f1fd3665ff6992f5e529f
|
067253be8f9740d46a1aaec8399345d810de08cd
|
refs/heads/main
| 2023-04-18T08:33:59.202499
| 2021-04-24T13:11:31
| 2021-04-24T13:11:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 492
|
py
|
from flask import Flask
from flask import redirect
from flask import url_for
app=Flask(__name__)
@app.route('/admin')
def hello_admin():
return 'hello admin'
@app.route('/guest/<guest>')
def hello_guest(guest):
return 'hello %s as guest'%guest
@app.route('/user/<name>')
def hello_user(name):
if(name=='admin'):
return redirect(url_for('hello_admin'))
else:
return redirect(url_for('hello_guest',guest=name))
if (__name__=='__main__'):
app.run(debug=True)
|
[
"ananthankrishnan77@gmail.com"
] |
ananthankrishnan77@gmail.com
|
d2669e23bfc199cd2b0f481099012e03524dc9b0
|
48c9fcc9d7856f92f9cda06aae0c5dcf350e48ea
|
/matchbook/endpoints/marketdata.py
|
670579a7d6b05033308759dfb454fc6635fd1266
|
[
"MIT"
] |
permissive
|
rozzac90/matchbook
|
1753d29918eadf5f26ba61611f27c3569ad6850e
|
d29f4704a0f69fa623422243d0b8372c8c172a2d
|
refs/heads/master
| 2021-12-25T18:25:05.002996
| 2018-11-24T17:34:35
| 2018-11-24T17:34:35
| 96,877,596
| 14
| 12
|
MIT
| 2021-12-15T18:07:03
| 2017-07-11T09:45:34
|
Python
|
UTF-8
|
Python
| false
| false
| 10,544
|
py
|
import datetime
from matchbook import resources
from matchbook.endpoints.baseendpoint import BaseEndpoint
from matchbook.enums import Boolean, Side, MarketNames, MarketType, MarketStates
from matchbook.utils import clean_locals
class MarketData(BaseEndpoint):
def get_events(self, event_id=None, before=None, after=None, sport_ids=None, category_ids=None,
states=MarketStates.All, tag_url_names=None, per_page=500, offset=0,
include_event_participants=Boolean.T, price_depth=3, side=Side.All,
minimum_liquidity=None, session=None):
"""
Get paginated events. Results can be filtered using various different parameters.
:param event_id: specific event id. Default None.
:type event_id: int
:param after: event start time lower cutoff. Default None.
:type after: UNIX timestamp
:param before: event start time upper cutoff. Default None.
:type before: UNIX timestamp
:param category_ids: filter results by category id. Default None.
:type category_ids: comma separated string
:param sport_ids: filter results by sports id(s). Default None.
:type sport_ids: comma separated string
:param states: filter results by event state or comma separated string of types. Default None.
:type states: matchbook.enums.MarketStates
:param tag_url_names:Only events with tags having url-name in the provided list are included in the response.
:type tag_url_names: comma separated string
:param per_page: number of results to show in a single result. Max=500. Default 20.
:type per_page: int
:param offset: starting page of results to show. Default 0.
:type offset: int
:param include_event_participants: A boolean indicating whether to return the event participants information
:type include_event_participants: matchbook.enums.Boolean
:param price_depth: max depth to be returned for prices. Default 3.
:type price_depth: int
:param side: filter results by side (dependent on exchange-type). Default None.
:type side: matchbook.enums.Side
:param minimum_liquidity: Only prices with available-amount greater than or equal to this value are included.
:type minimum_liquidity: float
:param session: requests session to be used.
:type session: requests.Session
:returns: Breakdown to each runner if they are included.
:rtype: json
:raises: matchbook.exceptions.ApiError
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
method = 'events'
params['odds-type'] = self.client.odds_type
params['exchange-type'] = self.client.exchange_type
params['currency'] = self.client.currency
if event_id:
method = 'events/%s' % event_id
del_keys = ['event-id', 'after', 'before', 'category-ids', 'sport-ids',
'states', 'per-page', 'offset', 'tag-url-names']
params = {k: v for k, v in params.items() if k not in del_keys}
response = self.request("GET", self.client.urn_edge, method, params=params, session=session)
response = response.json().get('event', response.json())
else:
response = self.request(
"GET", self.client.urn_edge, method, params=params, target='events', session=session
)
return self.process_response(response, resources.Event, date_time_sent, datetime.datetime.utcnow())
def get_markets(self, event_id, market_id=None, names=MarketNames.All, types=MarketType.All, offset=0, per_page=500,
states=MarketStates.All, price_depth=3, side=Side.Default, minimum_liquidity=None, session=None):
"""
Get paginated markets for an event specified by the event_id.
:param event_id: specific event id.
:type event_id: int
:param market_id: specific market id to pull data for.
:type market_id: int
:param states: filter results by market state or a comma separated string of states. Default 'open', 'suspended'
:type states: matchbook.enums.MarketStates
:param types: filter results by market type or a comma separated string of types. Default None.
:type types: matchbook.enums.MarketType
:param names: filter results by market name. Default None.
:type names: matchbook.enums.MarketNames
:param per_page: number of results to show in a single result. Max=500. Default 20.
:type per_page: int
:param offset: starting page of results to show. Default 0.
:type offset: int
:param price_depth: max depth to be returned for prices. Default 3.
:type price_depth: int
:param side: filter results by side (dependent on exchange-type). Default None.
:type side: matchbook.enums.Side
:param minimum_liquidity: Only prices with available-amount greater than or equal to this value are included.
:type minimum_liquidity: float
:param session: requests session to be used.
:type session: requests.Session
:returns: Breakdown of each runner if they are included.
:rtype: json
:raises: matchbook.exceptions.ApiError
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
params['odds-type'] = self.client.odds_type
params['exchange-type'] = self.client.exchange_type
params['currency'] = self.client.currency
method = 'events/%s/markets' % event_id
if market_id:
method = 'events/%s/markets/%s' % (event_id, market_id)
del_keys = ['names', 'types', 'per-page', 'offset', 'states']
params = {k: v for k, v in params.items() if k not in del_keys}
response = self.request('GET', self.client.urn_edge, method, params=params, session=session)
response = response.json().get('market', response.json())
else:
response = self.request(
"GET", self.client.urn_edge, method, params=params, target='markets', session=session
)
return self.process_response(response, resources.Market, date_time_sent, datetime.datetime.utcnow())
def get_runners(self, event_id, market_id, runner_id=None, states=MarketStates.All, include_withdrawn=Boolean.T,
include_prices=Boolean.T, price_depth=3, side=Side.All, minimum_liquidity=None, session=None):
"""
Get runner data for an event and market specified by their ids.
:param event_id: specific event id.
:type event_id: int
:param market_id: specific market id to pull data for.
:type market_id: int
:param runner_id: specific runner to pull data for.
:type runner_id: int
:param states: filter results by runner state or a comma separated string of states. Default 'open', 'suspended'
:param include_withdrawn: boolean for returning or not the withdrawn runners in the response.
:type include_withdrawn: matchbook.enums.Boolean
:param include_prices: boolean indicating whether to return the prices for the runners.
:type include_prices: matchbook.enums.Boolean
:type states: matchbook.enums.MarketStates
:param price_depth: max depth to be returned for prices. Default 3.
:type price_depth: int
:param side: filter results by side (dependent on exchange-type). Default None.
:type side: matchbook.enums.Side
:param minimum_liquidity: Only prices with available-amount greater than or equal to this value are included.
:type minimum_liquidity: float
:param session: requests session to be used.
:type session: requests.Session
:returns: Breakdown of each runner if they are included.
:rtype: json
:raises: matchbook.exceptions.ApiError
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
params['odds-type'] = self.client.odds_type
params['exchange-type'] = self.client.exchange_type
params['currency'] = self.client.currency
method = 'events/%s/markets/%s/runners' % (event_id, market_id)
if runner_id:
method = 'events/%s/markets/%s/runners/%s' % (event_id, market_id, runner_id)
del_keys = ['include-withdraw', 'states']
params = {k: v for k, v in params.items() if k not in del_keys}
response = self.request('GET', self.client.urn_edge, method, params=params, session=session)
response = response.json().get('runner', response.json())
else:
response = self.request(
'GET', self.client.urn_edge, method, params=params, target='runners', session=session
).json()
return self.process_response(response, resources.Runner, date_time_sent, datetime.datetime.utcnow())
def get_popular_markets(self, price_depth=3, side=Side.All, minimum_liquidity=None,
old_format=Boolean.F, session=None):
"""
Get popular markets as defined by matchbook.
:param price_depth: max depth to be returned for prices. Default 3.
:type price_depth: int
:param side: filter results by side (dependent on exchange-type). Default None.
:type side: matchbook.enums.Side
:param minimum_liquidity: Only prices with available-amount greater than or equal to this value are included.
:type minimum_liquidity: float
:param old_format:
:type old_format:
:param session: requests session to be used.
:type session: requests.Session
:returns: Breakdown of each runner if they are included.
:rtype: json
:raises: matchbook.exceptions.ApiError
"""
params = clean_locals(locals())
date_time_sent = datetime.datetime.utcnow()
params['odds-type'] = self.client.odds_type
params['exchange-type'] = self.client.exchange_type
params['currency'] = self.client.currency
response = self.request('GET', self.client.urn_edge, 'popular-markets', params=params, session=session)
return self.process_response(
response.json().get('markets', response.json()), resources.Market,
date_time_sent, datetime.datetime.utcnow()
)
|
[
"rory.cole1990@gmail.com"
] |
rory.cole1990@gmail.com
|
9eb155ab168b320e301794c6d06721d8159379c8
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/329/usersdata/297/91364/submittedfiles/dec2bin.py
|
f499b6f8e6c0b866d68629df150aa2c83d3d617b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
# -*- coding: utf-8 -*-
while(true):
p=int(input('digite um numero p: '))
q=int(input('digite um numero q: '))
if q>=p:
break
if str(p) in str(q):
print('S')
else :
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
e6080c62d3170229f32b8403d65ad8f216e5cae7
|
090d9e9dd0e8c1cf7a3338ea1464897c72ab82be
|
/KerasCNN.py
|
ddfb334d5b546ccf31c08de75d4af2c1c3e12db4
|
[] |
no_license
|
zhanming-wang/CNN_digit_recognizer_kaggle
|
f5451514a2990ae60342e96b715cbe9cd2708d31
|
e0dded47d0a44ef2c0a74bc6ff459fa83e78a246
|
refs/heads/master
| 2020-12-04T00:26:33.478317
| 2020-01-03T07:59:45
| 2020-01-03T07:59:45
| 231,537,504
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,465
|
py
|
import plaidml.keras
plaidml.keras.install_backend() #use AMD GPU
'''
import cv2
import os
import random
import pickle
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.metrics import confusion_matrix
import itertools
'''
import numpy as np
import pandas as pd
import keras
from sklearn.model_selection import train_test_split
from keras.layers import Conv2D, Dense, Dropout, MaxPool2D, Flatten
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau, TensorBoard, ModelCheckpoint
#Establishing the data directory
trainDataDir = '/Users/zhanmingwang/OneDrive/Programming/kaggle-digitRecognizer/data/train.csv'
testDataDir = '/Users/zhanmingwang/OneDrive/Programming/kaggle-digitRecognizer/data/test.csv'
#read data from CSV files
train = pd.read_csv(trainDataDir)
test = pd.read_csv(testDataDir)
#create the training data
y_train = train['label']
x_train = train.drop('label', axis=1)
#free up memory
del train
#Check for null and missing values
x_train.isnull().any().describe()
test.isnull().any().describe() #in this case, there is none
#normalization
x_train = x_train/ 255.0
test = test/ 255.0
#reshaping (reshape image in 3 dimensions)
x_train = x_train.values.reshape(-1, 28, 28, 1)
test = test.values.reshape(-1, 28, 28, 1)
#OneHot encode the labels --> [0, 0, 0, 0, 1, 0, 0 ...]
y_train = keras.utils.np_utils.to_categorical(y_train, num_classes= 10)
#split train and test
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1)
#build CNN
model = Sequential()
model.add(Conv2D(filters= 32, kernel_size=(5,1), padding= 'Same',
activation='relu', input_shape= (28,28,1)))
model.add(Conv2D(filters= 32, kernel_size=(1,5), padding= 'Same',
activation='relu', input_shape= (28,28,1)))
model.add(MaxPool2D(pool_size=(1,1)))
model.add(Conv2D(filters= 32, kernel_size=(5,1), padding='Same',
activation='relu'))
model.add(Conv2D(filters= 32, kernel_size=(1,5), padding='Same',
activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.1))
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='Same',
activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='Same',
activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.1))
model.add(Conv2D(filters=128, kernel_size=(2,2), padding='Same',
activation='relu'))
model.add(Conv2D(filters=128, kernel_size=(2,2), padding='Same',
activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.1))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(10, activation='softmax'))
#Define the optimizer
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) #RMSprop decreases gradient decent oscillation
#Compile the model
model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics=['accuracy'])
#Set a learning rate annealer (basically decreasing the lr dynamically with steps)
learning_rate_reduction = ReduceLROnPlateau(monitor= 'val_acc', #this will be use in the model.fit/ model.fit_generator
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00000001) #ReduceLROnPlateau decreases the learning rate when the model stops improving
epochs = 100 #theroetically it should get to 0.9967 accuracy
batch_size = 85
##DATA AUGMENTATION (prevent overfitting)
datagen = ImageDataGenerator( # used below in model.fit_generator
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10, #randomly roate image my 10 degrees
zoom_range= 0.1, #randomly zoon some images by 10%
width_shift_range=0.1, #randomly shift image to 10% of the left and right
height_shift_range=0.1, #randomly shift image to 10% of the height
horizontal_flip=False, #Not flipping because of 6 and 9
vertical_flip=False #Same reason why
)
datagen.fit(x_train) #horizontal or vertical flip might result in misclassifying symetircal numbers such as 6 and 9
#TRAIN!
history = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs, validation_data=(x_val, y_val),
verbose=2, steps_per_epoch=x_train.shape[0] // batch_size,
callbacks=[learning_rate_reduction]) #fit_generator runs the training and image augmentation in parallel
#Evaluation
#Trainig and validation curves
#predict results
results = model.predict(test)
#Select the index with the maximum probability
results = np.argmax(results, axis = 1)
results = pd.Series(results, name='Label')
submission = pd.concat([pd.Series(range(1, 28001), name='ImageId'), results],
axis=1)
submission.to_csv('cnn_mnist_datagen.csv', index=False)
|
[
"noreply@github.com"
] |
zhanming-wang.noreply@github.com
|
c4e8389d93f36f8805d8c3cdf58cabc747343f84
|
91fe8f479fa921fa84111d19222a5c6aa6eff030
|
/basis/execute-unit/aiohttp-and-asyncio-test.py
|
25312be5c6ecba564f33a7ed14ddc40b68021a95
|
[] |
no_license
|
romanticair/python
|
2055c9cdaa46894c9788d5797643283786ed46dd
|
6f91fe5e7cbedcdf4b8f7baa7641fd615b4d6141
|
refs/heads/master
| 2022-11-03T17:17:17.608786
| 2019-07-05T07:07:29
| 2019-07-05T07:07:29
| 195,356,190
| 0
| 1
| null | 2022-10-14T20:51:14
| 2019-07-05T07:00:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,355
|
py
|
"""
asyncio 可以实现单线程并发IO操作。如果仅用在客户端,发挥的威力不大。
如果把asyncio用在服务器端,例如Web服务器,由于HTTP连接就是IO操作,
因此可以用单线程+coroutine实现多用户的高并发支持
asyncio实现了TCP、UDP、SSL等协议,aiohttp则是基于asyncio实现的HTTP框架
aiohttp的初始化函数init()也是一个coroutine,loop.create_server()则利用asyncio创建TCP服务
编写一个HTTP服务器,分别处理以下URL
1. / - 首页返回b'<h1>Index</h1>';
2. /hello/{name} - 根据URL参数返回文本hello, %s!
"""
import asyncio
from aiohttp import web
async def index(request):
await asyncio.sleep(0.5)
return web.Response(body=b'<h1>Index</h1>')
async def hello(request):
await asyncio.sleep(0.5)
text = '<h1>hello, %s!</h1>' % request.match_info['name']
return web.Response(body=text.encode('utf-8'))
async def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', index)
app.router.add_route('GET', '/hello/{name}', hello)
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 3000)
print('Server started at http://127.0.0.1:3000...')
return srv
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
|
[
"1024519570@qq.com"
] |
1024519570@qq.com
|
1f45dcf7f0f4b3b2d3317758fe808c0da08e5812
|
749831b8cea53b9f10926512b34d6a0e97a9df98
|
/traffic/examples/camera.py
|
5c9a6bbd5c2e78a814af07b7d5c1c69089270182
|
[
"MIT"
] |
permissive
|
tasigabi97/traffic
|
cab0422382c0e4cf78b468c2e9ab94da02240bae
|
f90c6679b77a6c55717ccb9e5d53daff56fa30d2
|
refs/heads/master
| 2023-02-12T16:27:19.004386
| 2020-12-23T20:22:27
| 2020-12-23T20:22:27
| 291,692,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 644
|
py
|
"""
Egy példa arra, hogyan lehet használni a cv2-es kamera ablakokat.
"""
def main():
from traffic.camera import choose_camera
from traffic.imports import imshow_cv2
from traffic.cv2_input import cv2_input
with choose_camera() as camera: # Feldobja az elérhető kamerákat, amikből egyet kell kiválasztani.
cv2_input.wait_keys = "q"
while True:
imshow_cv2("Chosen-> ({})".format(camera.name), camera.img)
if cv2_input.pressed_key is not None: # A q betű lenyomásval be lehet zárni a kiválasztott kamera képét.
break
if __name__ == "__main__":
main()
|
[
"tasi.gabi97@gmail.com"
] |
tasi.gabi97@gmail.com
|
61cb7f3485bd6f3f5d33ac83b9b52e22c3713c3e
|
9dbd3b20c33ec93c0ade5032287db728de6fa712
|
/src/intent_service.py
|
e030a7d34ab679697601e3e08b286405f1bdb703
|
[] |
no_license
|
xs2pranjal/intent_via_audio
|
bd1ec058b51935463e781c159fbed2be02d18bd5
|
f57bce6ff9d8bcb7801bac362f2f52e57cba9204
|
refs/heads/master
| 2022-12-28T17:36:20.609023
| 2018-08-15T19:34:27
| 2018-08-15T19:34:27
| 144,739,836
| 0
| 0
| null | 2022-12-08T02:22:14
| 2018-08-14T15:35:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,388
|
py
|
import traceback
import numpy as np
from heapq import nlargest
from sklearn.metrics.pairwise import cosine_similarity
from helpers.noun_chunking import NounPhraseExtraction
from docs.config import CATEGORIES
from helpers.model_loader import GloveService
class IntentService():
''' The class is for Extracting the top n intents from the input text.'''
def __init__(self, top_n):
self.top_n = top_n
self.__noun_phrase_tokens=[]
self.__default_categories = CATEGORIES
self.__glove_model = GloveService()
def __generate_text_vector(self):
'''Generates the text vector for the present noun phrases vocab'''
token_vector_dict={}
for token in self.__noun_phrase_tokens:
# print(token)
try:
# print(type(self.__glove_model.get_vector(token)))
token_vector_dict[token] = self.__glove_model.get_vector(token)
except Exception as e:
pass
vector_mean = np.mean(list(token_vector_dict.values()),axis=0)
return vector_mean
def __get_text_category_affinity(self, text):
'''Computes the affinity between the text and the category'''
try:
self.__noun_phrase_tokens = NounPhraseExtraction().get_noun_phrases(text)
except Exception as e:
traceback.print_exc()
affinity_dict={}
affinity_dict["aspects"]=self.__noun_phrase_tokens
text_vector=self.__generate_text_vector()
#calculate text affinity from each category
category_affinity_dict={}
for category in self.__default_categories:
try:
category_vector = self.__glove_model.get_vector(category)
category_affinity_dict[category] = cosine_similarity(text_vector.reshape(1,-1),category_vector.reshape(1,-1)).item(0)
except Exception as e:
category_affinity_dict[category] = None
affinity_dict["affinity"] = category_affinity_dict
return affinity_dict
def get_default_category(self,text):
'''Returns the top-n intents closest to the text'''
category_affinity_dict=self.__get_text_category_affinity(text)
affinity_dict=category_affinity_dict.get('affinity')
five_largest=nlargest(self.top_n,affinity_dict,key=affinity_dict.get)
return five_largest
|
[
"pranjal@bitbucket.org"
] |
pranjal@bitbucket.org
|
d3c659c5b2ff919672ec54e7eac8e65d13674bbe
|
9e510a65df0400925845a29e43ae5ddd010671f8
|
/appengine/subscription_handlers.py
|
53b1e2c41d7691ccc796637e5b3b77dcdb81ffe2
|
[] |
no_license
|
nstahl/scoping_nyc
|
bda4c8beee8c5c9e3c3aab1ddc8a38b63da11f2b
|
bfb8b00f0d3196534551ad89aa0b8fd335e61897
|
refs/heads/master
| 2020-06-05T07:02:40.072555
| 2013-04-06T16:26:19
| 2013-04-06T16:26:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,575
|
py
|
import app_constants
import json
import logging
import time
import urllib
import webapp2
from google.appengine.ext import db
from google.appengine.api import urlfetch
class LocationInfo(db.Model):
""" Models a location and everything we want to keep about it """
object_id = db.StringProperty(indexed = False) # Object Id of the subscription
id = db.StringProperty(indexed = False) # id of the subscription
loc = db.StringProperty(indexed = False)
date = db.DateTimeProperty(auto_now_add=True, indexed = False)
def to_dict(self):
return dict([(p, unicode(getattr(self, p))) for p in self.properties()])
class ListSubscriptions(webapp2.RequestHandler):
""" Lists the subscriptions that SnapCity app has on the Instagram Server.
The response is the response that we get from Instagram.
Example usage: http://localhost:8080/listsubscriptions
"""
def get(self):
form_fields = {
'client_id': app_constants.CLIENT_ID,
'client_secret' : app_constants.CLIENT_SECRET
}
form_data = urllib.urlencode(form_fields)
result = urlfetch.fetch(url='%s?%s'%(app_constants.SUBSCRIPTION_URL,form_data),
method=urlfetch.GET)
json_data = json.loads(result.content)
logging.info("List of subscribers:%s", json.dumps(json_data,indent=2))
self.response.write("<pre>")
self.response.write(json.dumps(json_data,indent=2))
self.response.write("</pre>")
class DeleteSubscriptions(webapp2.RequestHandler):
""" Deletes subscriptions both from Instagram and from the DB we keep.
You can delete single subscriptions by providing the id, or
you can delete all the subscriptions.
Parameters:
id : id of the subscription you want to delete.
if you omit id, or set id to all, it will delete all subscriptions.
Example usage: http://localhost:8080/deletesubscriptions?id=all
This class does the same thing both for get or post requests.
"""
def get(self):
return self.post()
def post(self):
id = self.request.get('id').lower()
if not id:
id = 'all'
self.response.headers['Content-Type'] = 'text/plain'
if self._delete_subscriber(id):
self.response.write('Deleted subscriptions for %s'%id)
else:
self.response.write('Failed to delete subscriptions for %s'%id)
def _delete_subscriber(self, id):
form_fields = {
'client_id': app_constants.CLIENT_ID,
'client_secret' : app_constants.CLIENT_SECRET,
}
if id == "all":
form_fields["object"] = "all"
else:
form_fields['id'] = id
form_data = urllib.urlencode(form_fields)
logging.info("Calling delete with: %s", form_data)
result = urlfetch.fetch(url='%s?%s'%(app_constants.SUBSCRIPTION_URL, form_data),
method=urlfetch.DELETE)
json_data = json.loads(result.content)
logging.info("Delete returned:%s", json.dumps(json_data,indent=2))
if json_data["meta"]["code"]!= 200 :
logging.info("Status code not 200: %s", result.content)
return False
return True
class MakeSubscription(webapp2.RequestHandler):
""" Makes a subscription for a location.
Only certain locations are supported.
Example usage:
http://localhost/makesubsciption?loc=newyork
"""
def get(self):
loc = self.request.get('loc').lower()
return_msg = ''
if loc not in app_constants.GEOCODES:
return_msg = 'Error: \'%s\' is not a recognized location\n'%loc
return_msg += 'This app only works with %s'%app_constants.MATCHERS.keys()
else:
response_arr = self._register_subscriber(loc)
if response_arr:
#store it
db_id = int(time.mktime(time.gmtime()))
new_key = db.Key.from_path('LocationInfo', db_id)
locsub = LocationInfo(key=new_key)
locsub.id = response_arr[0]
locsub.object_id = response_arr[1]
locsub.loc = loc
locsub.put()
return_msg = 'Made subscription with id:%s for loc:%s '%(str(response_arr[0]),loc)
else:
return_msg = 'Failed to make subscription'
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(return_msg)
def _register_subscriber(self, loc):
form_fields = {
'client_id': app_constants.CLIENT_ID,
'client_secret' : app_constants.CLIENT_SECRET,
'verify_token' : app_constants.VERIFY_TOKEN,
'object' : 'geography',
'aspect' : 'media',
'lat' : app_constants.GEOCODES[loc][0],
'lng' : app_constants.GEOCODES[loc][1],
'radius' : '5000',
'callback_url' : 'http://snapcity02.appspot.com/stream'
}
form_data = urllib.urlencode(form_fields)
#post to instagram
result = urlfetch.fetch(url=app_constants.SUBSCRIPTION_URL,
payload=form_data,
method=urlfetch.POST)
if result.status_code != 200:
logging.info("Status code not 200, it is %s: %s", result.status_code, result.content)
return
#success,some info about the subscription follows
data = json.loads(result.content)
object_id = data["data"]["object_id"]
id = data["data"]["id"]
logging.info("Registered subscriber for loc:%s, id:%s and object_id:%s", loc, id, object_id)
return [id, object_id]
class Helper(object):
@staticmethod
def getLocation():
locsubs = db.GqlQuery("SELECT * FROM LocationInfo")
if not locsubs.count():
logging.error('Cannot fetch. No current subscription')
return
return locsubs[0]
|
[
"r.niko.stahl@gmail.com"
] |
r.niko.stahl@gmail.com
|
b7d7318c9811ece1a6235ddd3b823c0e77caf9d3
|
c760566d38787fc212d95d1b18172ad8093e2758
|
/muse_redshifting.py
|
e58c15958c9b05998a506c259f6572abaf9f230c
|
[
"Unlicense"
] |
permissive
|
sdjohnson-astro/redshifting
|
86b35111607034d59621e78823fc862fc30992f6
|
2aa81524fec8717cd4469f64d729a609bec1a9b9
|
refs/heads/master
| 2022-12-26T14:25:49.720710
| 2022-12-13T13:59:41
| 2022-12-13T13:59:41
| 170,399,163
| 7
| 4
|
Unlicense
| 2021-02-10T03:27:14
| 2019-02-12T22:10:25
|
Python
|
UTF-8
|
Python
| false
| false
| 72,406
|
py
|
#!/usr/bin/env python
from PyQt5 import QtGui, QtCore # (the example applies equally well to PySide)
import pyqtgraph as pg
import sys
import os
from astropy.io import fits
from astropy.table import Table, Column, vstack, unique
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d
from astropy.table import Table
import numpy as np
import glob
import argparse
import pyqtgraph.parametertree as pt
import redshift
import shutil
import mpdaf
def formatspectrum(wave, flux, error=0.0, mask=1, model=0.0, flat=0.0, arc=0.0,raw=0.0,rawerr=0.0):
spec = np.zeros(len(flux),
dtype={'names':('wave', 'flux', 'error',
'mask', 'model', 'flat', 'arc', 'raw', 'rawerr'),
'formats':(float, float, float, float, float,
float, float,float,float)})
# spec['wave'] = 0.0
# spec['flux'] = 0.0
# spec['error'] = 0.0
# spec['mask'] = 1
# spec['model'] = 0.0
# spec['flat'] = 0.0
# spec['arc'] = 0.0
spec['wave'] = wave
spec['flux'] = flux
spec['error'] = error
spec['model'] = model
spec['mask'] = mask
spec['flat'] = flat
spec['arc'] = arc
spec['raw'] = raw
spec['rawerr'] = rawerr
spec['error'][~np.isfinite(spec['flux'])] = 0.0
spec['flux'][~np.isfinite(spec['flux'])] = 0.0
spec['flux'][~np.isfinite(spec['error'])] = 0.0
spec['error'][~np.isfinite(spec['error'])] = 0.0
index = np.where((spec['error'] == 0.0) | ((spec['flux'] == 0.0) & (spec['error'] == 1.0)))
spec['mask'][index] = 0.0
return spec
def getspec1Dname(mask, row, id, name):
path = '{}_spec1D'.format(mask)
return '{}/{}_{}_{}_spec1D.fits'.format(path, row, id, name)
def getspec2Dname(mask, row, id, name):
path = '{}_spec1D'.format(mask)
return '{}/{}_{}_{}_spec2D.fits'.format(path, row, id, name)
def getredshift1Dname(mask, row, id, name):
path = '{}_spec1D'.format(mask)
return '{}/{}_{}_{}_redshift.fits'.format(path, row, id, name)
#for CarPy output
def createmusefiles(mask):
#if version=='cosmos':
# print('Creating spec1D files')
# spec1Darray = fits.getdata(mask + '_1spec.fits')
# header1D = fits.getheader(mask + '_1spec.fits')
# # Create wavelength array
# wave = header1D['CRVAL1'] + np.arange(header1D['NAXIS1'])*header1D['CDELT1']
# nRows = spec1Darray.shape[1]
# path = '{}_spec1D'.format(mask)
# os.mkdir(path)
#
# rows = Column(np.arange(nRows, dtype='int') + 1, name='row')
# ids = Column(np.chararray(nRows, itemsize=20), name='id')
# classes = Column(np.chararray(nRows, itemsize=6), name='class')
# redshifts = Column(np.zeros(nRows), name='redshift')
# qualities = Column(np.zeros(nRows, dtype='int') - 1, name='quality')
# comments = Column(np.chararray(nRows, itemsize=100), name='comment')
#
# extpos = Column(np.zeros(nRows,dtype='float'), name='extpos')
# extaper = Column(np.zeros(nRows,dtype='float'), name='extaper')
# extflag = Column(np.zeros(nRows,dtype=bool), name='extflag')
# boxes = Column(np.zeros(nRows,dtype=bool), name='alignbox')
# # objects = Table([rows, ids, classes, redshifts, qualities, comments])
# objects = Table([rows, ids, classes, redshifts, qualities, comments, extpos, extaper,extflag])
# objects['comment'] = 'none'
#
# for i in range(nRows):
# flux1Draw = spec1Darray[0, i, :]
# error1Draw = spec1Darray[1, i, :]
# flat1D = spec1Darray[2, i, :]
# arc1D = spec1Darray[3, i, :]
# flux1D = spec1Darray[4, i, :]
# error1D = spec1Darray[5, i, :]
# spec = formatspectrum(wave, flux1D, error1D, 1.0, 0.0, flat1D, arc1D)
# apnum1D = header1D['APNUM{}'.format(i+1)]
# apnum1Darray = apnum1D.split(' ')
# id = apnum1Darray[1]
# savename = getspec1Dname(mask, i+1, id)
# fits.writeto(savename, spec)
# objects[i]['id'] = id
# print(i+1)
# objects['class'] = 'galaxy'
# objects['quality'] = -1
# objects.write('{}/{}_objects.fits'.format(path, mask), overwrite=True)
print('Creating spec1D and 2D files')
objects = Table.read('{}.dat'.format(mask), format='ascii.fixed_width')
objects['class'] = 'galaxy'
objects['quality'] = -1
objects['redshift'] = 0.0
objects['comment'] = 'none '
# Create spec1D files
path = '{}_spec1D'.format(mask)
os.mkdir(path)
# Crate object file
objects.write('{}_spec1D/{}_objects.fits'.format(mask, mask), overwrite=True)
cube = mpdaf.obj.Cube('{}.fits'.format(mask))
data = np.array(cube.data)
dimW, dimY, dimX = np.shape(data)
wave = cube[:, 0, 0].wave.coord(np.arange(0, cube.shape[0], 1.0))
wcs = cube.wcs
for object in objects:
# Get the 1D spectrum
print('{} {}'.format(object['row'], object['id']))
spec = cube.aperture((object['dec'], object['ra']), object['radius'])
flux = np.array(spec.data)
ivar = np.array(1/(spec.var*1.4))
error = np.sqrt(1/ivar)
spec = formatspectrum(wave, flux, error)
# Save the 1D spectrum
savename = getspec1Dname(mask, object['row'], object['id'], object['name'])
print(savename)
fits.writeto(savename, spec, overwrite=True)
# Get the 2D spectrum
yx = wcs.sky2pix((object['dec'], object['ra']))[0]
y = yx[0]
x = yx[1]
#print('{}'.format(object['id']))
#print('y, x={}, {}'.format(y, x))
#print(np.shape(data))
minx = int(x-0.6/0.2)
if minx < 0:
minx = 0
maxx = int(x+0.6/0.2)
if maxx >= dimX:
maxx = dimX - 1
miny = int(y-3.0/0.2)
if miny < 0:
miny = 0
maxy = int(y+3.0/0.2)
if maxy >= dimY:
maxy = dimY - 1
#print('x={} {} to {}'.format(x, minx, maxx))
#print('y={} {} to {}'.format(y, miny, maxy))
spec2D = data[:, miny:maxy, minx:maxx]
spec2D = np.nansum(spec2D, axis=2)
#print('spec2D = {}'.format(np.shape(spec2D)))
#print('')
savename = getspec2Dname(mask, object['row'], object['id'], object['name'])
fits.writeto(savename, spec2D, overwrite=True)
#print(objects)
class muse_redshiftgui:
"""Defining the GUI class for MUSE redshift assignment"""
def __init__(self, mask, xsize=1000, ysize=1000):
self.mask = mask
self.xsize = xsize
self.ysize = ysize
# Read in the data cube
self.cube = mpdaf.obj.Cube('{}.fits'.format(self.mask))
self.whitelight = np.rot90(fits.getdata('{}_WHITE.fits'.format(self.mask)), 3)
self.cube_wcs = self.cube.wcs
self.wave = self.cube[:, 0, 0].wave.coord(np.arange(0, self.cube.shape[0], 1.0))
path = '{}_spec1D'.format(mask)
self.objects = Table.read('{}/{}_objects.fits'.format(path, mask))
# Set the initial row number to zero
self.row = 1
self.nRows = len(self.objects)
self.smoothing = 1
self.z = 0.0
self.redshifted = 0
# masking flags
self.mask_flag = 0
self.mask_left = np.array([0.0, 0.0])
self.mask_right = np.array([0.0, 0.0])
self.object_labelled = False
# Get the GUI ready
self.app = QtGui.QApplication([]) # Always start by initializing Qt
self.widget = QtGui.QWidget() # Define a top-level widget
# Set the widget size
self.widget.resize(self.xsize, self.ysize)
# Create the white-light image gui
#self.widget_whitelight = QtGui.QWidget() # Define a top-level widget
#self.widget_whitelight.resize(np.shape(self.whitelight)[0]*5, np.shape(self.whitelight)[1]*5)
#self.plot_whitelight_win = pg.GraphicsLayoutWidget()
#self.plot_whitelight_plot = self.plot_whitelight_win.addPlot()
#self.plot_whitelight = pg.ImageItem(border='w')
#self.plot_whitelight_plot.addItem(self.plot_whitelight)
#self.plot_whitelight_plot.setMouseEnabled(x=False, y=False)
#self.plot_whitelight_hist = pg.HistogramLUTWidget()
#self.plot_whitelight_hist.setImageItem(self.plot_whitelight)
#self.plot_whitelight_win.setAspectLocked(True)
#
#cm = self.plot_whitelight_hist.gradient.colorMap()
#cm.pos=np.array([1.,0.]) #is this really the easiest way to make white->black into black->white?
#self.plot_whitelight_hist.gradient.setColorMap(cm)
#self.layout_whitelight = QtGui.QGridLayout()
#self.widget_whitelight.setLayout(self.layout_whitelight)
#self.layout_whitelight.addWidget(self.plot_whitelight_win, 0, 0)
#self.layout_whitelight.addWidget(self.plot_whitelight_hist, 0, 1)
self.whitelight_win = QtGui.QMainWindow()
self.whitelight_win.resize(800,800)
self.whitelight_view = pg.ImageView()
self.whitelight_win.setCentralWidget(self.whitelight_view)
self.whitelight_win.show()
self.whitelight_win.setWindowTitle('pyqtgraph example: ImageView')
cm = self.whitelight_view.getHistogramWidget().gradient.colorMap()
cm.pos=np.array([1.,0.])
self.whitelight_view.setColorMap(cm)
#self.draw_whitelight()
#self.widget_whitelight.show()
#self.app.exec_()
# Set the background plotting widget
self.plot_redshift = pg.PlotWidget()
self.plot_redshift.getAxis('bottom').setPen(pg.mkPen('w', width=2))
self.plot_redshift.getAxis('top').setPen(pg.mkPen('w', width=2))
self.plot_redshift.getAxis('left').setPen(pg.mkPen('w', width=2))
self.plot_redshift.getAxis('right').setPen(pg.mkPen('w', width=2))
self.plot_redshift.getAxis('bottom').setStyle(tickLength=-15)
self.plot_redshift.getAxis('top').setStyle(tickLength=-15)
self.plot_redshift.getAxis('left').setStyle(tickLength=-15)
self.plot_redshift.getAxis('right').setStyle(tickLength=-15)
self.plot_redshift.showAxis('right')
self.plot_redshift.showAxis('top')
self.plot_redshift.setLabel('bottom', 'redshift')
self.plot_redshift.setLabel('left', 'chi2')
# Set the 2D spectrum
#self.plot_spec2D = pg.ImageView()
#self.plot_spec2D.removeItem(self.plot_spec2D.getHistogramWidget())
self.plot_spec2D_win = pg.GraphicsLayoutWidget()
# self.plot_spec2D_view = self.plot_spec2D_win.addViewBox()
self.plot_spec2D_plot = self.plot_spec2D_win.addPlot()
self.plot_spec2D = pg.ImageItem(border='w')
self.plot_spec2D_plot.addItem(self.plot_spec2D)
self.plot_spec2D_plot.setMouseEnabled(x=False, y=False)
self.plot_spec2D_hist = pg.HistogramLUTWidget()
self.plot_spec2D_hist.setImageItem(self.plot_spec2D)
cm = self.plot_spec2D_hist.gradient.colorMap()
cm.pos=np.array([1.,0.]) #is this really the easiest way to make white->black into black->white?
self.plot_spec2D_hist.gradient.setColorMap(cm)
# self.plot_spec2D.scene().sigMouseMoved.connect(self.mouseMoved_spec2D)
# Set the 1D spectrum
self.plot_spec1D = pg.PlotWidget()
self.plot_spec1D.getAxis('bottom').setPen(pg.mkPen('w', width=2))
self.plot_spec1D.getAxis('top').setPen(pg.mkPen('w', width=2))
self.plot_spec1D.getAxis('left').setPen(pg.mkPen('w', width=2))
self.plot_spec1D.getAxis('right').setPen(pg.mkPen('w', width=2))
self.plot_spec1D.getAxis('bottom').setStyle(tickLength=-15)
self.plot_spec1D.getAxis('top').setStyle(tickLength=-15)
self.plot_spec1D.getAxis('left').setStyle(tickLength=-15)
self.plot_spec1D.getAxis('right').setStyle(tickLength=-15)
self.plot_spec1D.showAxis('right')
self.plot_spec1D.showAxis('top')
self.plot_spec1D.setLabel('bottom', 'Wavelength [Å]')
# self.plot_spec2D.getAxis('bottom').linkToView(self.plot_spec1D.getVieWBox())
#self.plot_spec1D.setLabel('left', 'Flux')
self.mouse_x_spec1D = 0.0
self.mouse_y_spec1D = 0.0
self.mouse_x_spec2D = 0.0
self.mouse_y_spec2D = 0.0
# Tie the image and spectrum
# self.plot_spec2D.translate(4000,0)
# self.plot_spec2D.scale(2,1)
# self.plot_spec2D_view.linkView(self.plot_spec2D_view.XAxis,self.plot_spec1D.getViewBox())
# Setup the layout
self.layout = QtGui.QGridLayout()
self.widget.setLayout(self.layout)
# Set up right click menu
self.featureListMenu = QtGui.QMenu("Galaxy lines")
self.OVI1033 = QtGui.QAction("OVI 1033.82", self.featureListMenu)
self.OVI1033.triggered.connect(self.setRedshiftOVI1033)
self.featureListMenu.addAction(self.OVI1033)
self.HI1215 = QtGui.QAction("HI Lya 1215.24", self.featureListMenu)
self.HI1215.triggered.connect(self.setRedshiftHI1215)
self.featureListMenu.addAction(self.HI1215)
self.NV1240 = QtGui.QAction("NV 1240.81", self.featureListMenu)
self.NV1240.triggered.connect(self.setRedshiftNV1240)
self.featureListMenu.addAction(self.NV1240)
self.CIV1549 = QtGui.QAction("CIV 1549.48", self.featureListMenu)
self.CIV1549.triggered.connect(self.setRedshiftCIV1549)
self.featureListMenu.addAction(self.CIV1549)
self.OIII1665 = QtGui.QAction("OIII 1665.85", self.featureListMenu)
self.OIII1665.triggered.connect(self.setRedshiftOIII1665)
self.featureListMenu.addAction(self.OIII1665)
self.CIII1908 = QtGui.QAction("CIII 1908.734", self.featureListMenu)
self.CIII1908.triggered.connect(self.setRedshiftCIII1908)
self.featureListMenu.addAction(self.CIII1908)
self.MgII2799 = QtGui.QAction("MgII 2799.117", self.featureListMenu)
self.MgII2799.triggered.connect(self.setRedshiftMgII2799)
self.featureListMenu.addAction(self.MgII2799)
self.OII3728 = QtGui.QAction("[OII] 3728.60", self.featureListMenu)
self.OII3728.triggered.connect(self.setRedshiftOII3728)
self.featureListMenu.addAction(self.OII3728)
self.CaIIK3934 = QtGui.QAction("CaII K 3934.777", self.featureListMenu)
self.CaIIK3934.triggered.connect(self.setRedshiftCaIIK3934)
self.featureListMenu.addAction(self.CaIIK3934)
self.CaIIH3969 = QtGui.QAction("CaII H 3969.588", self.featureListMenu)
self.CaIIH3969.triggered.connect(self.setRedshiftCaIIH3969)
self.featureListMenu.addAction(self.CaIIH3969)
self.Hd4102 = QtGui.QAction("Hd 4102.89", self.featureListMenu)
self.Hd4102.triggered.connect(self.setRedshiftHd4102)
self.featureListMenu.addAction(self.Hd4102)
self.Gband4305 = QtGui.QAction("G-band 4305.61", self.featureListMenu)
self.Gband4305.triggered.connect(self.setRedshiftGband4305)
self.featureListMenu.addAction(self.Gband4305)
self.Hg4341 = QtGui.QAction("Hg 4341.68", self.featureListMenu)
self.Hg4341.triggered.connect(self.setRedshiftHg4341)
self.featureListMenu.addAction(self.Hg4341)
self.OIII4364 = QtGui.QAction("[OIII] 4364.436", self.featureListMenu)
self.OIII4364.triggered.connect(self.setRedshiftOIII4364)
self.featureListMenu.addAction(self.OIII4364)
self.Hb4862 = QtGui.QAction("Hb 4862", self.featureListMenu)
self.Hb4862.triggered.connect(self.setRedshiftHb4862)
self.featureListMenu.addAction(self.Hb4862)
self.OIII4960 = QtGui.QAction("[OIII] 4960.295", self.featureListMenu)
self.OIII4960.triggered.connect(self.setRedshiftOIII4960)
self.featureListMenu.addAction(self.OIII4960)
self.OIII5008 = QtGui.QAction("[OIII] 5008.240", self.featureListMenu)
self.OIII5008.triggered.connect(self.setRedshiftOIII5008)
self.featureListMenu.addAction(self.OIII5008)
self.MgI5176 = QtGui.QAction("MgI 5176.7", self.featureListMenu)
self.MgI5176.triggered.connect(self.setRedshiftMgI5176)
self.featureListMenu.addAction(self.MgI5176)
self.NaI5895 = QtGui.QAction("NaI 5895.6", self.featureListMenu)
self.NaI5895.triggered.connect(self.setRedshiftNaI5895)
self.featureListMenu.addAction(self.NaI5895)
self.OI6302 = QtGui.QAction("[OI] 6302.046", self.featureListMenu)
self.OI6302.triggered.connect(self.setRedshiftOI6302)
self.featureListMenu.addAction(self.OI6302)
self.OI6365 = QtGui.QAction("[OI] 6365.536", self.featureListMenu)
self.OI6365.triggered.connect(self.setRedshiftOI6365)
self.featureListMenu.addAction(self.OI6365)
self.NII6549 = QtGui.QAction("[NII] 6549.86", self.featureListMenu)
self.NII6549.triggered.connect(self.setRedshiftNII6549)
self.featureListMenu.addAction(self.NII6549)
self.Ha6564 = QtGui.QAction("Ha 6564.61", self.featureListMenu)
self.Ha6564.triggered.connect(self.setRedshiftHa6564)
self.featureListMenu.addAction(self.Ha6564)
self.NII6585 = QtGui.QAction("[NII] 6585.27", self.featureListMenu)
self.NII6585.triggered.connect(self.setRedshiftNII6585)
self.featureListMenu.addAction(self.NII6585)
self.SII6718 = QtGui.QAction("[SII] 6718.29", self.featureListMenu)
self.SII6718.triggered.connect(self.setRedshiftSII6718)
self.featureListMenu.addAction(self.SII6718)
self.SII6732 = QtGui.QAction("[SII] 6732.67", self.featureListMenu)
self.SII6732.triggered.connect(self.setRedshiftSII6732)
self.featureListMenu.addAction(self.SII6732)
self.CaII8500 = QtGui.QAction("CaII 8500.36", self.featureListMenu)
self.CaII8500.triggered.connect(self.setRedshiftCaII8500)
self.featureListMenu.addAction(self.CaII8500)
self.CaII8544 = QtGui.QAction("CaII 8544.44", self.featureListMenu)
self.CaII8544.triggered.connect(self.setRedshiftCaII8544)
self.featureListMenu.addAction(self.CaII8544)
self.CaII8664 = QtGui.QAction("CaII 8664.52", self.featureListMenu)
self.CaII8664.triggered.connect(self.setRedshiftCaII8664)
self.featureListMenu.addAction(self.CaII8664)
self.plot_spec1D.getPlotItem().ctrlMenu = []
self.plot_spec1D.getPlotItem().ctrlMenu = [self.featureListMenu]
# Add listeners
self.plot_spec1D.scene().sigMouseMoved.connect(self.mouseMoved_spec1D)
self.plot_spec1D.keyPressEvent = self.keypress_spec1D
self.plot_redshift.scene().sigMouseMoved.connect(self.mouseMoved_redshift)
self.plot_redshift.keyPressEvent = self.keypress_redshift
self.paramSpec = [
dict(name='z=', type='str', value='{:0.5f}'.format(self.z), dec=False, readonly=True),
dict(name='quality:', type='str', value='', readonly=True),
dict(name='class:', type='str', value='', readonly=True),
dict(name='row:', type='str', value='', readonly=True),
dict(name='id:', type='str', value='', readonly=True),
dict(name='x, y:', type='str', value='(0.0, 0.0)', readonly=True),
dict(name='Show lines', type='bool', value=True),
dict(name='r=', type='float', value=0.8, step=0.2, limits=[0.0, None]),
dict(name='Re-extract', type='action')
# dict(name='extraction center:', type='str', value='', readonly=True)
# dict(name='extraction aper:', type='str', value='', readonly=True)
]
self.param = pt.Parameter.create(name='Options', type='group', children=self.paramSpec)
#Redraw when the boolean option buttons are pressed
self.param.children()[6].sigValueChanged.connect(self.draw)
self.param.children()[8].sigStateChanged.connect(self.reExtract)
# self.param.children()[6].sigValueChanged.connect(self.draw)
# self.param.children()[7].sigValueChanged.connect(self.draw)
# self.param.children()[8].sigValueChanged.connect(self.setExtFlag)
self.tree = pt.ParameterTree()
self.tree.setParameters(self.param)
self.features = Table.read(os.environ['REDSHIFTING'] + '/redshiftLines.dat', format='ascii')
self.objectsTable = pg.TableWidget(editable=False, sortable=False)
self.objectsTable.setFormat('%0.5f', 3)
self.setTable()
self.objectsTable.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.objectsTable.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.objectsTable.doubleClicked.connect(self.goToObject)
# Add comment bar
self.comment_text = QtGui.QLineEdit('comments here')
self.comment_text.focusOutEvent = self.updateComment
# Add plot_bg to the layout
self.layout.addWidget(self.plot_redshift, 0, 0)
self.layout.addWidget(self.plot_spec2D_win, 1, 0)
self.layout.addWidget(self.plot_spec2D_hist, 1, 1)
self.layout.addWidget(self.tree, 0, 1)
self.layout.addWidget(self.objectsTable, 2, 1)
self.layout.addWidget(self.plot_spec1D, 2, 0)
self.layout.addWidget(self.comment_text, 4, 0)
self.layout.setColumnStretch(0, 4)
self.layout.setColumnStretch(1, 1)
self.layout.setColumnStretch(1, 1)
#self.layout.setColumnStretch(0, 2)
#self.layout.setColumnStretch(0, 2)
#self.layout.setColumnStretch(3, 1)
#self.layout.setColumnStretch(3, 0)
self.layout.setRowStretch(0, 2)
self.layout.setRowStretch(1, 1)
self.layout.setRowStretch(2, 3)
self.setSpec()
#Set 2D X-axis values to be equal to the 1D--this works so long as all spectra are the same size
#since it is just based on the first one read in
self.plot_spec2D.translate(min(self.wave),0)
self.plot_spec2D.scale((max(self.wave)-min(self.wave))/len(self.wave),1)
self.plot_spec2D_plot.setXLink(self.plot_spec1D)
self.draw()
self.widget.show()
self.app.exec_()
def reExtract(self):
print('Re-extracting!')
self.objects[self.row-1]['radius'] = self.param['r=']
# Get the 1D spectrum
object = self.objects[self.row-1]
spec = self.cube.aperture((object['dec'], object['ra']), object['radius'])
flux = np.array(spec.data)
ivar = np.array(1/(spec.var*1.4))
error = np.sqrt(1/ivar)
spec = formatspectrum(self.wave, flux, error)
# Save the 1D spectrum
savename = getspec1Dname(self.mask, object['row'], object['id'], object['name'])
print(savename)
fits.writeto(savename, spec, overwrite=True)
self.setTable()
self.setSpec()
self.draw()
def updateComment(self, event):
self.objects[self.row-1]['comment'] = self.comment_text.text()
def setTable(self):
self.objectsTable.setData(np.array(self.objects['id', 'name', 'class', 'redshift', 'quality', 'comment']))
def goToObject(self):
#print('Going to object...')
self.row = self.objectsTable.selectedItems()[0].row()+1
self.setSpec()
# self.draw()
def setRedshiftOVI1033(self):
wave0 = 1033.82
self.z = self.mouse_x_spec1D/wave0 - 1
#self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftHI1215(self):
wave0 = 1215.24
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftNV1240(self):
wave0 = 1240.81
self.z = self.mouse_x_spec1D/wave0 - 1
#self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftCIV1549(self):
wave0 = 1549.48
self.z = self.mouse_x_spec1D/wave0 - 1
#self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftOIII1665(self):
wave0 = 1665.85
self.z = self.mouse_x_spec1D/wave0 - 1
#self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftCIII1908(self):
wave0 = 1908.734
self.z = self.mouse_x_spec1D/wave0 - 1
#self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftMgII2799(self):
wave0 = 2799.117
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftOII3728(self):
wave0 = 3728.60
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftCaIIK3934(self):
wave0 = 3934.777
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftCaIIH3969(self):
wave0 = 3969.588
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftHd4102(self):
wave0 = 4102.89
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftGband4305(self):
wave0 = 4305.61
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftHg4341(self):
wave0 = 4341.68
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftOIII4364(self):
wave0 = 4364.436
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftHb4862(self):
wave0 = 4862.68
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftOIII4960(self):
wave0 = 4960.295
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftOIII5008(self):
wave0 = 5008.240
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftMgI5176(self):
wave0 = 5176.7
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftNaI5895(self):
wave0 = 5895.6
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftOI6302(self):
wave0 = 6302.046
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftOI6365(self):
wave0 = 6365.536
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftNII6549(self):
wave0 = 6549.86
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftHa6564(self):
wave0 = 6564.61
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.fitObjectAtRedshift()
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftNII6585(self):
wave0 = 6585.27
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftSII6718(self):
wave0 = 6718.29
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftSII6732(self):
wave0 = 6732.67
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftCaII8500(self):
wave0 = 8500.36
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftCaII8544(self):
wave0 = 8544.44
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setRedshiftCaII8664(self):
wave0 = 8664.52
self.z = self.mouse_x_spec1D/wave0 - 1
self.fitObjectAtRedshift()
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def setClass(self, classification):
self.objects[self.row-1]['class'] = classification
self.param['class:'] = classification
self.draw()
def setQuality(self, quality):
self.objects[self.row-1]['quality'] = quality
self.param['quality:'] = quality
self.draw()
def setExtFlag(self):
self.objects[self.row-1]['extflag']=self.param['Bad Extraction:']
self.save()
def keypress_redshift(self, event):
if self.plot_redshift_current:
#print('')
#print(event.text())
#print(event.key())
#print('{:0.4f}, {:0.2f}'.format(self.mouse_x_redshift, self.mouse_y_redshift))
if (event.text() == 'n'):
self.advance(1)
if (event.text() == 'N'):
self.advance(10)
if (event.text() == 'b'):
self.advance(-1)
if (event.text() == 'B'):
self.advance(-10)
if event.text() == '[':
self.panx_redshift(-1.0/3.0)
if event.text() == ']':
self.panx_redshift(1.0/3.0)
if event.text() == '{':
self.panx_redshift(-1.0)
if event.text() == '}':
self.panx_redshift(1.0)
if event.text() == 'x':
self.zoomxy_redshift(1.0/1.5, 1.0)
if event.text() == 'X':
self.zoomxy_redshift(1.5, 1.0)
if event.text() == 'y':
self.zoomxy_redshift(1.0, 1.0/1.5)
if event.text() == 'Y':
self.zoomxy_redshift(1.0, 1.5)
if (event.text() == 'w') | (event.text() == 'W'):
self.plot_redshift.autoRange()
# Set redshift
if event.text() == 'z':
self.z = self.mouse_x_redshift
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.fitObjectAtRedshift()
self.draw()
if (event.text() == 'h') | (event.text() == 'H'):
self.setClass('star')
if (event.text() == 'g') | (event.text() == 'G'):
self.setClass('galaxy')
if (event.text() == 'j') | (event.text() == 'J'):
self.setClass('quasar')
if (event.text() == 'k') | (event.text() == 'K'):
self.setClass('hizgal')
# if event.text() == 'R':
# self.redshiftAll()
if event.text() == 'r':
self.redshiftObject()
if event.text() == 'l':
self.redshiftObjectLocal()
if (event.text() == 'a') | (event.text() == 'A'):
self.setQuality(2)
if (event.text() == 's') | (event.text() == 'S'):
self.setQuality(1)
if (event.text() == 'd') | (event.text() == 'D'):
self.setQuality(0)
if (event.text() == 'f') | (event.text() == 'F'):
self.setQuality(-1)
if event.text() == ';':
self.incrementRedshift(-0.0001)
if event.text() == "'":
self.incrementRedshift(+0.0001)
if event.text() == ':':
self.incrementRedshift(-0.001)
if event.text() == '"':
self.incrementRedshift(+0.001)
if event.text() == ',':
self.incrementRedshift(-0.01)
if event.text() == '.':
self.incrementRedshift(+0.01)
if event.text() == '<':
self.incrementRedshift(-0.1)
if event.text() == '>':
self.incrementRedshift(+0.1)
# left arrow
if event.key() == 16777234:
self.incrementRedshift(-1)
# Right arrow
if event.key() == 16777236:
self.incrementRedshift(+1)
def zoomxy_redshift(self, scalex, scaley):
"""Zoom in or out in wavelength (x) and/or flux (y)"""
xRange = self.plot_redshift.getViewBox().state['viewRange'][0]
x0 = xRange[0]
x1 = xRange[1]
xRange = (x1 - x0)*scalex
x0_new = self.mouse_x_redshift - xRange/2.0
x1_new = self.mouse_x_redshift + xRange/2.0
self.plot_redshift.setXRange(x0_new, x1_new, padding=0)
yRange = self.plot_redshift.getViewBox().state['viewRange'][1]
y0 = yRange[0]
y1 = yRange[1]
yRange = (y1 - y0)*scaley
y0_new = self.mouse_y_redshift - yRange/2.0
y1_new = self.mouse_y_redshift + yRange/2.0
self.plot_redshift.setYRange(y0_new, y1_new, padding=0)
def zoom_default(self):
q1,q2=np.percentile(self.flux1D[self.spec['mask'].astype('bool')],[0.1,99.9])
q1 = np.min([0.0, q1, -0.1*q2])
self.plot_spec1D.setYRange(q1,2*q2,padding=0)
self.plot_spec1D.setXRange(np.min(self.wave), np.max(self.wave), padding=0)
# def zoom_redshift(self, key, ):
# """Zoom in or out in wavelength (x) and/or flux (y)"""
# xRange = self.plot_redshift.getViewBox().state['viewRange'][0]
# x0 = xRange[0]
# x1 = xRange[1]
# xRange = (x1 - x0)*scalex
# x0_new = self.mouse_x_redshift - xRange/2.0
# x1_new = self.mouse_x_redshift + xRange/2.0
# self.plot_redshift.setXRange(x0_new, x1_new, padding=0)
# yRange = self.plot_redshift.getViewBox().state['viewRange'][1]
# y0 = yRange[0]
# y1 = yRange[1]
# yRange = (y1 - y0)*scaley
# y0_new = self.mouse_y_redshift - yRange/2.0
# y1_new = self.mouse_y_redshift + yRange/2.0
# self.plot_redshift.setYRange(y0_new, y1_new, padding=0)
def panx_redshift(self, scalex):
"""Pan in the wavelength direction"""
xRange = self.plot_redshift.getViewBox().state['viewRange'][0]
x0 = xRange[0]
x1 = xRange[1]
shift = scalex*(x1 - x0)
x0_new = x0 + shift
x1_new = x1 + shift
self.plot_redshift.setXRange(x0_new, x1_new, padding=0)
def keypress_spec1D(self, event):
if self.plot_spec1D_current:
#print(event.text())
#print(event.key())
#print('')
if (event.text() == 'n'):
self.advance(1)
if (event.text() == 'N'):
self.advance(10)
if (event.text() == 'b'):
self.advance(-1)
if (event.text() == 'B'):
self.advance(-10)
if event.text() == '[':
self.panx_spec(-1.0/3.0)
if event.text() == ']':
self.panx_spec(1.0/3.0)
if event.text() == '{':
self.panx_spec(-1.0)
if event.text() == '}':
self.panx_spec(1.0)
if event.text() == 'x':
self.zoomxy_spec(1.0/1.5, 1.0)
if event.text() == 'X':
self.zoomxy_spec(1.5, 1.0)
if event.text() == 'y':
self.zoomxy_spec(1.0, 1.0/1.5)
if event.text() == 'Y':
self.zoomxy_spec(1.0, 1.5)
if event.text() in ['t','T','e','E']:
#The not is because empty strings (pressing shift) get included too!
self.trimxy_spec(event.text())
if (event.text() == 'w') | (event.text() == 'W'):
self.zoom_default()
# self.plot_spec1D.autoRange()
# self.updateXrange_1D()
if (event.text() == '=') | (event.text() == '+'):
self.smoothing = self.smoothing + 2
if self.smoothing == 3:
self.smoothing = 5
self.smoothSpec()
if (event.text() == '-') | (event.text() == '_'):
self.smoothing = self.smoothing - 2
if self.smoothing < 5:
self.smoothing = 1
self.smoothSpec()
if event.text() == 'm':
self.changeMask(0)
if event.text() == 'M':
# self.changeMask(0)
self.autoMask()
if event.text() == 'u':
self.changeMask(1)
if event.text() == 'U':
self.changeMask(1)
if (event.text() == 'h') | (event.text() == 'H'):
self.setClass('star')
if (event.text() == 'g') | (event.text() == 'G'):
self.setClass('galaxy')
if (event.text() == 'j') | (event.text() == 'J'):
self.setClass('quasar')
if (event.text() == 'k') | (event.text() == 'K'):
self.setClass('hizgal')
if event.text() == '/':
self.redshiftAll()
if event.text() == 'r':
self.redshiftObject()
if event.text() == 'l':
self.redshiftObjectLocal()
if (event.text() == 'a') | (event.text() == 'A'):
self.setQuality(2)
if (event.text() == 's') | (event.text() == 'S'):
self.setQuality(1)
if (event.text() == 'd') | (event.text() == 'D'):
self.setQuality(0)
if (event.text() == 'f') | (event.text() == 'F'):
self.setQuality(-1)
# if (event.text() == 'c') & (self.version=='carpy'):
# self.setExtFlag()
if event.text() == ';':
self.incrementRedshift(-0.0001)
if event.text() == "'":
self.incrementRedshift(+0.0001)
if event.text() == ':':
self.incrementRedshift(-0.001)
if event.text() == '"':
self.incrementRedshift(+0.001)
if event.text() == ',':
self.incrementRedshift(-0.01)
if event.text() == '.':
self.incrementRedshift(+0.01)
if event.text() == '<':
self.incrementRedshift(-0.1)
if event.text() == '>':
self.incrementRedshift(+0.1)
# left arrow
if event.key() == 16777234:
self.incrementRedshift(-1)
# Right arrow
if event.key() == 16777236:
self.incrementRedshift(+1)
if event.text() == '1':
self.setRedshiftHa6564()
if event.text() == '2':
self.setRedshiftOIII5008()
if event.text() == '3':
self.setRedshiftOIII4960()
if event.text() == '4':
self.setRedshiftHb4862()
if event.text() == '5':
self.setRedshiftHg4341()
if event.text() == '6':
self.setRedshiftHd4102()
if event.text() == '7':
self.setRedshiftOII3728()
if event.text() == '8':
self.setRedshiftCIII1908()
if event.text() == '9':
self.setRedshiftCIV1549()
if event.text() == '0':
self.setRedshiftHI1215()
if event.text() == '!':
self.setRedshiftNaI5895()
if event.text() == '@':
self.setRedshiftMgI5176()
if event.text() == '#':
self.setRedshiftGband4305()
if event.text() == '$':
self.setRedshiftCaIIH3969()
if event.text() == '%':
self.setRedshiftCaIIK3934()
if event.text() == '^':
self.setRedshiftMgII2799()
if event.text() == 'z':
self.fitObjectAtRedshift()
# if return or enter is pressed then save.
if (event.key() == 16777220) | (event.key() == 16777221) | (event.key() == 96) | (event.key() == 126):
self.save()
def incrementRedshift(self, dz):
self.z = self.z + dz
self.objects[self.row-1]['redshift'] = self.z
self.param['z='] = '{:0.5f}'.format(self.z)
self.draw()
def redshiftAll(self):
# Start at row 1
for object in self.objects:
# if it already has a redshift:
# continue
self.row = object['row']
self.setSpec()
if self.redshifted:
print('{}/{} {} already redshifted. Skipping'.format(self.row, self.nRows, object['class']))
else:
self.autoMask()
self.redshiftObject()
print('{}/{} {} z_best={:0.4f}'.format(self.row, self.nRows, object['class'], object['redshift']))
def fitObjectAtRedshift(self):
spec = self.spec
if self.objects[self.row-1]['class'] == 'galaxy':
z = self.z
eigenvalues, model, chi2pdf = redshift.fitatz_galaxy(spec, z)
spec['model'] = model
if self.objects[self.row-1]['class'] == 'star':
z = self.z
eigenvalues, model, chi2pdf = redshift.fitatz_star(spec, z)
spec['model'] = model
if self.objects[self.row-1]['class'] == 'quasar':
z = self.z
eigenvalues, model, chi2pdf = redshift.fitatz_qso(spec, z)
spec['model'] = model
if self.objects[self.row-1]['class'] == 'hizgal':
z = self.z
if z > 2.5:
eigenvalues, model, chi2pdf = redshift.fitatz_latis(spec, z)
spec['model'] = model
print('Redshift assigned by hand {} {} z={:0.4f} and saved'.format(self.objects[self.row-1]['row'],self.objects[self.row-1]['id'], z))
self.objects[self.row-1]['redshift'] = z
#self.redshifted = 1
#self.redshifts = Table(redshifts)
self.save()
self.draw()
def redshiftObjectLocal(self):
spec = self.spec
z = self.z
if self.objects[self.row-1]['class'] == 'galaxy':
redshifts = redshift.findz_galaxy(spec, zmin=z-0.01, zmax=z+0.01, dz=0.0001)
minIndex = np.argmin(redshifts['chi2_pdf'])
z = redshifts[minIndex]['z']
eigenvalues, model, chi2pdf = redshift.fitatz_galaxy(spec, z)
spec['model'] = model
if self.objects[self.row-1]['class'] == 'star':
redshifts = redshift.findz_star(spec, zmin=self.z-0.001, zmax=self.z+0.001, dz=0.0001)
minIndex = np.argmin(redshifts['chi2_pdf'])
z = redshifts[minIndex]['z']
eigenvalues, model, chi2pdf = redshift.fitatz_star(spec, z)
spec['model'] = model
if self.objects[self.row-1]['class'] == 'quasar':
redshifts = redshift.findz_qso(spec, zmin=self.z-0.01, zmax=self.z+0.01, dz=0.0001)
minIndex = np.argmin(redshifts['chi2_pdf'])
z = redshifts[minIndex]['z']
eigenvalues, model, chi2pdf = redshift.fitatz_qso(spec, z)
spec['model'] = model
if self.objects[self.row-1]['class'] == 'hizgal':
redshifts = redshift.findz_latis(spec, zmin=z-0.01, zmax=z+0.01, dz=0.0001)
minIndex = np.argmin(redshifts['chi2_pdf'])
z = redshifts[minIndex]['z']
eigenvalues, model, chi2pdf = redshift.fitatz_latis(spec, z)
spec['model'] = model
self.objects[self.row-1]['redshift'] = z
if self.redshifted == 0:
self.redshifted = 1
self.redshifts = Table(redshifts)
else:
redshifts = vstack((Table(self.redshifts), Table(redshifts)))
redshifts = unique(redshifts, keys='z')
redshifts.sort('z')
self.redshifts = redshifts
self.model1D = spec['model']
self.spec = spec
print('Redshifting Locally {} {} z={:0.4f} and saved'.format(self.objects[self.row-1]['row'],self.objects[self.row-1]['id'], z))
self.z = z
self.param['z='] = '{:0.5f}'.format(self.z)
self.save()
self.draw()
def redshiftObject(self):
#spec = self.spec
nGoodPix = np.sum(self.spec['mask'])
if nGoodPix > 5:
if self.objects[self.row-1]['class'] == 'galaxy':
redshifts = redshift.findz_galaxy(self.spec, zmin=-0.01, zmax=1.5, dz=0.0003)
minIndex = np.argmin(redshifts['chi2_pdf'])
z = redshifts[minIndex]['z']
redshifts_fine = redshift.findz_galaxy(self.spec, zmin=z-0.01, zmax=z+0.01, dz=0.0001)
redshifts = vstack((Table(redshifts), Table(redshifts_fine)))
redshifts = unique(redshifts, keys='z')
redshifts.sort('z')
minIndex = np.argmin(redshifts['chi2_pdf'])
z = redshifts[minIndex]['z']
redshifts = np.array(redshifts)
eigenvalues, model, chi2pdf = redshift.fitatz_galaxy(self.spec, z)
self.spec['model'] = model
if self.objects[self.row-1]['class'] == 'star':
redshifts = redshift.findz_star(self.spec, zmin=-0.01, zmax=0.01, dz=0.0001)
minIndex = np.argmin(redshifts['chi2_pdf'])
z = redshifts[minIndex]['z']
eigenvalues, model, chi2pdf = redshift.fitatz_star(self.spec, z)
self.spec['model'] = model
if self.objects[self.row-1]['class'] == 'quasar':
redshifts = redshift.findz_qso(self.spec, zmin=-0.01, zmax=4.0, dz=0.001)
minIndex = np.argmin(redshifts['chi2_pdf'])
z = redshifts[minIndex]['z']
redshifts_fine = redshift.findz_qso(self.spec, zmin=z-0.01, zmax=z+0.01, dz=0.0001)
redshifts = vstack((Table(redshifts), Table(redshifts_fine)))
redshifts = unique(redshifts, keys='z')
redshifts.sort('z')
minIndex = np.argmin(redshifts['chi2_pdf'])
z = redshifts[minIndex]['z']
redshifts = np.array(redshifts)
eigenvalues, model, chi2pdf = redshift.fitatz_qso(self.spec, z)
self.spec['model'] = model
if self.objects[self.row-1]['class'] == 'hizgal':
redshifts = redshift.findz_latis(self.spec)
minIndex = np.argmin(redshifts['chi2_pdf'])
print(Table(redshifts))
z = redshifts[minIndex]['z']
eigenvalues, model, chi2pdf = redshift.fitatz_latis(self.spec, z)
self.spec['model'] = model
print('Redshifting {} {} z={:0.4f} and saved'.format(self.objects[self.row-1]['row'],
self.objects[self.row-1]['id'], z))
print()
self.objects[self.row-1]['redshift'] = z
self.z = z
self.redshifted = 1
self.redshifts = Table(redshifts)
self.model1D = self.spec['model']
self.param['z='] = '{:0.5f}'.format(self.z)
self.save()
self.draw()
else:
self.z = 0.0
self.redshift = 0
def save(self):
self.setTable()
#
path = '{}_spec1D'.format(self.mask)
self.objects.write('{}/{}_objects.fits'.format(path, self.mask), overwrite=True)
savename = getspec1Dname(self.mask, self.objects[self.row-1]['row'],
self.objects[self.row-1]['id'],
self.objects[self.row-1]['name'])
if not os.path.isfile(savename):
return
fits.writeto(savename, self.spec, overwrite=True)
# If we have a redshift array, store it
if self.redshifted == 1:
savename = getredshift1Dname(self.mask, self.objects[self.row-1]['row'],
self.objects[self.row-1]['id'],
self.objects[self.row-1]['name'])
self.redshifts.write(savename, overwrite=True)
print(Table(self.spec))
print('Saved')
# if os.path.isfile(path):
# savename = getspec1Dname(self.mask, self.objects[self.row-1]['row'],
# self.objects[self.row-1]['id'])
# fits.writeto(savename, self.spec, overwrite=True)
# self.objects.write('{}/{}_objects.fits'.format(path, self.mask), overwrite=True)
# # If we have a redshift array, store it
# if self.redshifted == 1:
# savename = getredshift1Dname(self.mask, self.objects[self.row-1]['row'],
# self.objects[self.row-1]['id'])
# self.redshifts.write(savename, overwrite=True)
# print('Saved')
def autoMask(self):
"""Automatically mask things below and above some range, and the A band"""
sky5580 = [5578.5 - 7.0, 5578.5 + 7.0]
index = np.where(((self.wave > sky5580[0]) & (self.wave < sky5580[1])) | (self.error1D == 0))
self.spec['mask'][index] = 0
self.draw()
def changeMask(self, newvalue):
"""Change the mask"""
if self.mask_flag == 0:
self.mask_left = np.array([self.mouse_x_spec1D, self.mouse_y_spec1D])
self.mask_flag = 1
else:
self.mask_right = np.array([self.mouse_x_spec1D, self.mouse_y_spec1D])
wave0 = np.min([self.mask_left[0], self.mask_right[0]])
wave1 = np.max([self.mask_left[0], self.mask_right[0]])
index = np.where((self.wave > wave0) & (self.wave < wave1))
self.spec['mask'][index] = newvalue
self.mask_flag = 0
self.draw()
def smoothSpec(self):
"""Smooth the spectrum using Savitzky-Golay filter."""
if self.smoothing > 1:
self.flux1D = savgol_filter(self.spec['flux'], self.smoothing, 2)
self.error1D = savgol_filter(self.spec['error'], self.smoothing, 2)/np.sqrt(self.smoothing)
self.model1D = savgol_filter(self.spec['model'], self.smoothing, 2)
if self.smoothing == 1:
self.flux1D = self.spec['flux']
self.error1D = self.spec['error']
self.model1D = self.spec['model']
self.draw()
def trimxy_spec(self, key):
"""Trim plotting region in wavelength (x) and/or flux (y)"""
if key in 'eE':
xRange = self.plot_spec1D.getViewBox().state['viewRange'][0]
x0 = xRange[0]
x1 = xRange[1]
xnew = self.mouse_x_spec1D
if key=='E': x1=xnew
else: x0=xnew
self.plot_spec1D.setXRange(x0, x1, padding=0)
elif key in 'tT':
yRange = self.plot_spec1D.getViewBox().state['viewRange'][1]
y0 = yRange[0]
y1 = yRange[1]
ynew = self.mouse_y_spec1D
if key=='t': y1=ynew
else: y0=ynew
self.plot_spec1D.setYRange(y0, y1, padding=0)
self.updateXrange_1D()
def zoomxy_spec(self, scalex, scaley):
"""Zoom in or out in wavelength (x) and/or flux (y)"""
xRange = self.plot_spec1D.getViewBox().state['viewRange'][0]
x0 = xRange[0]
x1 = xRange[1]
xRange = (x1 - x0)*scalex
x0_new = self.mouse_x_spec1D - xRange/2.0
x1_new = self.mouse_x_spec1D + xRange/2.0
self.plot_spec1D.setXRange(x0_new, x1_new, padding=0)
yRange = self.plot_spec1D.getViewBox().state['viewRange'][1]
y0 = yRange[0]
y1 = yRange[1]
yRange = (y1 - y0)*scaley
y0_new = self.mouse_y_spec1D - yRange/2.0
y1_new = self.mouse_y_spec1D + yRange/2.0
self.plot_spec1D.setYRange(y0_new, y1_new, padding=0)
self.updateXrange_1D()
# self.specCursor.setPos((x1_new-x0_new)/2,(y1_new-y0_new)/2)
def panx_spec(self, scalex):
"""Pan in the wavelength direction"""
xRange = self.plot_spec1D.getViewBox().state['viewRange'][0]
x0 = xRange[0]
x1 = xRange[1]
shift = scalex*(x1 - x0)
x0_new = x0 + shift
x1_new = x1 + shift
self.plot_spec1D.setXRange(x0_new, x1_new, padding=0)
self.updateXrange_1D()
def mouseMoved_spec1D(self, pos):
"""Keep track of where the mouse and update the xrange on the 2D plot to match the 1D"""
self.plot_spec1D_current = True
self.plot_spec2D_current = False
self.plot_redshift_current = False
self.mouse_x_spec1D = self.plot_spec1D.mapToView(pos).x()
self.mouse_y_spec1D = self.plot_spec1D.mapToView(pos).y()
self.updateXrange_1D()
self.setTitle_1D()
#This is now obsolete since the axes are linked
def updateXrange_1D(self):
pass
# xRange = self.plot_spec1D.getViewBox().state['viewRange'][0]
# x0 = xRange[0]
# x1 = xRange[1]
# # Interpolate wavelength to index very inefficient way to do this but I am lazy
# indexes = np.arange(len(self.wave))
# indexes_interp = interp1d(self.wave, indexes, bounds_error=False, fill_value='extrapolate')
# index0 = indexes_interp(x0)
# index1 = indexes_interp(x1)
# #index = np.where((self.wave > x0) & (self.wave < x1))[0]
# #if len(index) > 0:
# #index0 = np.min(index)
# #index1 = np.max(index)
# self.plot_spec2D_view.setXRange(index0, index1, padding=0.035)
def advance(self, delt):
self.save()
self.row = self.row + delt
if self.row < 1:
self.row = self.nRows
if self.row > self.nRows:
self.row = 1
self.setSpec()
#print('{}/{}'.format(self.row, self.nRows))
#self.draw()
def setSpec(self, autoRange=True):
print('setSpect')
"""Set the spectrum to current row"""
self.z = self.objects[self.row-1]['redshift']
self.id = self.objects[self.row-1]['id']
self.name = self.objects[self.row-1]['name']
self.comment_text.setText(self.objects[self.row-1]['comment'])
#if self.version=='carpy':
# self.id=self.objects[self.row-1]['id']
# if not os.path.isfile(getspec1Dname(self.mask, self.row, self.id)):
# print('Files for {} do not exist. Moving on.'.format(self.id))
# self.redshifted=0
# self.advance(1)
# return
# self.flux2D=fits.getdata(getspec2Dname(self.mask,self.id)).transpose()
#
#elif self.version=='cosmos':
# # Get the apnum header parameter
# self.apnum1D = self.header1D['APNUM{}'.format(self.row)]
# self.apnum2D = self.header2D['APNUM{}'.format(self.row)]
# self.apnum1Darray = self.apnum1D.split(' ')
# self.apnum2Darray = self.apnum2D.split(' ')
# self.id = self.apnum1Darray[1]
# self.y0=int(self.header2D['CSECT{}A'.format(self.row)])
# self.y1=int(self.header2D['CSECT{}B'.format(self.row)])
# self.y0 = int(float(self.apnum2Darray[2]))
# self.y1 = int(float(self.apnum2Darray[3]))
# self.flux2D = self.spec2Darray[self.y0:self.y1, :].transpose()
self.spec = fits.getdata(getspec1Dname(self.mask, self.row, self.id, self.name))
print(Table(self.spec))
self.flux2D = fits.getdata(getspec2Dname(self.mask, self.row, self.id, self.name))
self.wave = self.spec['wave']
self.flux1D = self.spec['flux']
self.flux1Draw = self.spec['raw']
self.error1D = self.spec['error']
self.error1Draw=self.spec['rawerr']
self.model1D = self.spec['model']
self.flat1D = self.spec['flat']
self.arc1D = self.spec['arc']
print('Done reading')
self.smoothSpec()
print('Done smoothing)')
# Check for redshift filename and read in if present.
redshiftFilename = getredshift1Dname(self.mask,
self.objects[self.row-1]['row'],
self.objects[self.row-1]['id'],
self.objects[self.row-1]['name'])
if os.path.isfile(redshiftFilename):
self.redshifted = 1
self.redshifts = Table.read(redshiftFilename)
#print('Already redshifted')
else:
self.redshifted = 0
self.redshifts = None
print('drawing')
self.draw()
print('done drawing')
self.plot_redshift.autoRange()
if autoRange:
self.plot_spec1D.autoRange()
self.zoom_default()
#self.plot_spec2D_hist.setHistogramRange(*np.percentile(self.flux2D,[0.1,99.9]))
self.plot_spec2D_hist.setLevels(*np.percentile(self.flux2D,[0.5,99.5]))
self.param['row:'] = '{}/{}'.format(self.row, self.nRows)
self.param['id:'] = '{}'.format(self.objects[self.row-1]['id'])
self.param['class:'] = '{}'.format(self.objects[self.row-1]['class'])
self.param['z='] = '{:0.5f}'.format(self.objects[self.row-1]['redshift'])
self.param['r='] = '{:0.2f}'.format(self.objects[self.row-1]['radius'])
self.param['quality:'] = '{}'.format(self.objects[self.row-1]['quality'])
ra = self.objects[self.row-1]['ra']
dec = self.objects[self.row-1]['dec']
yx = self.cube_wcs.sky2pix((dec, ra))[0]
self.param['x, y:'] = '{:0.2f}, {:0.2f}'.format(float(yx[1]), float(yx[0]))
print('Set spec done')
print('')
def mouseMoved_redshift(self, pos):
"""Keep track of where the mouse and update the xrange on the 2D plot to match the 1D"""
self.plot_spec1D_current = False
self.plot_spec2D_current = False
self.plot_redshift_current = True
self.mouse_x_redshift = self.plot_redshift.mapToView(pos).x()
self.mouse_y_redshift = self.plot_redshift.mapToView(pos).y()
def mouseMoved_spec2D(self, pos):
"""Keep track of where the mouse and update the xrange on the 2D plot to match the 1D"""
self.plot_spec1D_current = False
self.plot_spec2D_current = True
self.plot_redshift_current = False
self.mouse_x_spec2D = self.plot_spec2D.mapToView(pos).x()
self.mouse_y_spec2D = self.plot_spec2D.mapToView(pos).y()
def setTitle_1D(self):
self.plot_spec1D.setTitle('{} {}/{} {:0.2f}, {:.2E}'.format(self.id, self.row, self.nRows, self.mouse_x_spec1D, self.mouse_y_spec1D))
def draw(self):
# Clear plots
self.plot_redshift.clear()
self.plot_spec1D.clear()
if self.param['Show lines']:
features = self.features
observedWaves = features['wave']*(1 + self.z)
features = features[((observedWaves > np.min(self.wave)) & (observedWaves < np.max(self.wave))) | (features['list'] == 'sky')]
for feature in features:
if feature['list'] == 'sky':
self.plot_spec1D.addItem(pg.InfiniteLine(feature['wave'],
pen=pg.mkPen('g', width=2, style=QtCore.Qt.DotLine),
label='{} {:0.1f}'.format(feature['name'], feature['wave']),
labelOpts={'position':0.8, 'rotateAxis':[1, 0]}))
elif feature['list'] == 'quasar':
self.plot_spec1D.addItem(pg.InfiniteLine(feature['wave']*(1 + self.z),
pen=pg.mkPen('b', width=2, style=QtCore.Qt.DotLine),
label='{} {:0.1f}'.format(feature['name'], feature['wave']),
labelOpts={'position':0.8, 'rotateAxis':[1, 0]}))
elif feature['list'] == 'absorption':
self.plot_spec1D.addItem(pg.InfiniteLine(feature['wave']*(1 + self.z),
pen=pg.mkPen('r', width=2, style=QtCore.Qt.DotLine),
label='{} {:0.1f}'.format(feature['name'], feature['wave']),
labelOpts={'position':0.8, 'rotateAxis':[1, 0]}))
elif feature['list'] == 'qsoals':
self.plot_spec1D.addItem(pg.InfiniteLine(feature['wave']*(1 + self.z),
pen=pg.mkPen('r', width=2, style=QtCore.Qt.DotLine),
label='{} {:0.1f}'.format(feature['name'], feature['wave']),
labelOpts={'position':0.8, 'rotateAxis':[1, 0]}))
elif feature['list'] == 'emission':
self.plot_spec1D.addItem(pg.InfiniteLine(feature['wave']*(1 + self.z),
pen=pg.mkPen('y', width=2, style=QtCore.Qt.DotLine),
label='{} {:0.1f}'.format(feature['name'], feature['wave']),
labelOpts={'position':0.8, 'rotateAxis':[1, 0]}))
if self.redshifted == 1:
self.plot_redshift.plot(self.redshifts['z'], self.redshifts['chi2_pdf'],
pen=pg.mkPen('w', width=1))
self.plot_redshift.addItem(pg.InfiniteLine(self.z,
pen=pg.mkPen('r', width=2, style=QtCore.Qt.DotLine)))
self.plot_redshift.autoRange()
print(np.shape(self.flux1D))
self.plot_spec1D.plot(self.wave, self.flux1D*self.spec['mask'],
pen=pg.mkPen('w', width=1))
self.plot_spec1D.plot(self.wave, self.error1D*self.spec['mask'],
pen=pg.mkPen('c', width=1))
self.plot_spec1D.plot(self.wave, self.model1D,pen=pg.mkPen('r', width=2))
self.setTitle_1D()
# 2D spectrum
self.plot_spec2D.setImage(self.flux2D, xvals=self.wave,
levels=self.plot_spec2D_hist.getLevels(),
border=pg.mkPen('w', width=2))
# whitelight image
self.whitelight_view.setImage(self.whitelight, levels=[-2, 50])
ra = self.objects[self.row-1]['ra']
dec = self.objects[self.row-1]['dec']
yx = self.cube_wcs.sky2pix((dec, ra))[0]
x = yx[1]
y = yx[0]
diameter = self.objects[self.row-1]['radius']*2/0.2
#self.whitelight_view.removeItem(objectMarker)
if self.object_labelled == False:
self.objectMarker = pg.CircleROI(np.array([x - diameter/2, np.shape(self.whitelight)[1] - y - diameter/2]), [diameter, diameter],
pen=pg.mkPen('r', width=2),
movable=False, removable=False)
diameter = 10.0/0.2
self.objectMarker_outer = pg.CircleROI(np.array([x - diameter/2, np.shape(self.whitelight)[1] - y - diameter/2]), [diameter, diameter],
pen=pg.mkPen('r', width=1),
movable=False, removable=False)
self.whitelight_view.addItem(self.objectMarker)
self.whitelight_view.addItem(self.objectMarker_outer)
self.object_labelled = True
else:
self.whitelight_view.removeItem(self.objectMarker)
self.whitelight_view.removeItem(self.objectMarker_outer)
self.objectMarker = pg.CircleROI(np.array([x - diameter/2, np.shape(self.whitelight)[1] - y - diameter/2]), [diameter, diameter],
pen=pg.mkPen('r', width=2),
movable=False, removable=False)
diameter = 10.0/0.2
self.objectMarker_outer = pg.CircleROI(np.array([x - diameter/2, np.shape(self.whitelight)[1] - y - diameter/2]), [diameter, diameter],
pen=pg.mkPen('r', width=1),
movable=False, removable=False)
self.whitelight_view.addItem(self.objectMarker)
self.whitelight_view.addItem(self.objectMarker_outer)
self.object_labelled = True
#self.whitelight_view.autoLevels()
self.objectsTable.selectRow(self.row-1)
self.objectsTable.scrollToItem(self.objectsTable.item(self.row-1,0),QtGui.QAbstractItemView.PositionAtCenter)
# temp=self.objectsTable.indexFromItem(self.objectsTable.item(self.row-1,0))
# import pdb
# pdb.set_trace()
# self.objectsTable.scrollTo(temp,QtGui.QAbstractItemView.PositionAtCenter)
# Set up the command line argument parser
parser = argparse.ArgumentParser(description='Assign redshifts for sources in MUSE. Requires a MUSE datacube and input catalog file with object coordinates.')
parser.add_argument('-m', metavar='muse cube filename', type=str, help='muse cube filename', required=True)
parser.add_argument('-xsize', metavar='xsize', type=int, help='xsize in pixels', default=2500)
parser.add_argument('-ysize', metavar='ysize', type=int, help='ysize in pixels', default=1500)
args = parser.parse_args()
# Check for the 1d spectrum files.
if not os.path.isdir('{}_spec1D'.format(args.m)):
print('Creating 1D spectrum files')
createmusefiles(args.m)
else:
print('1D spectrum files already present. Copying objects file to backup')
shutil.copy('{}_spec1D/{}_objects.fits'.format(args.m,args.m),'{}_spec1D/{}_objects_bkp.fits'.format(args.m,args.m))
redshiftgui = muse_redshiftgui(args.m, args.xsize, args.ysize)
|
[
"seanjoh@umich.edu"
] |
seanjoh@umich.edu
|
3107387d5fc4b538b140c287cf51aa5e4414d44b
|
940c664cd3c64d21a4d6197cd0aff79d0c6b0532
|
/src/common/functions/external_memory_func.py
|
088f20ce34b264d52d0b8abbe72febeb40191fed
|
[
"MIT"
] |
permissive
|
0shimax/chainer-learning-to-remember-rare-events
|
2093385adee2ad3ec2c189cc9ffb3738f8cee5d6
|
8cf27a6afbaa058f8ad03df4d2ee91a62cccdf7d
|
refs/heads/master
| 2021-01-22T23:37:05.765437
| 2017-04-23T10:32:36
| 2017-04-23T10:32:36
| 85,662,113
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,692
|
py
|
import numpy as np
import chainer
from chainer import cuda, function
class ExternalMemoryFunction(function.Function):
def forward(self, inputs):
self.xp = cuda.get_array_module(*inputs)
if len(inputs)==5:
embedding_vecs, t, self.memory, self.update_weight, train = inputs
else:
embedding_vecs, self.memory, self.update_weight, train = inputs
self.n_class, self.n_unit = self.memory.shape
weight = self._compute_attention(embedding_vecs)
if train:
self._calculate_center(embedding_vecs, t)
return weight, self.memory,
def backward(self, inputs, grad_outputs):
"""never backward
"""
if len(inputs)==5:
return None,None,None,None,None,
else:
return None,None,None,None
def _normalize(self, vec, vec_sum):
normalized_vec = \
self.xp.where( \
vec==0, \
self.xp.zeros_like(vec), \
vec/vec_sum \
).transpose(1,0)
return normalized_vec
def _calculate_channel_idx(self, embedding_vecs, t):
represented_vec = self.xp.zeros( \
(self.n_class, embedding_vecs.shape[1]), dtype=embedding_vecs.dtype)
for vec, klass_idx in zip(embedding_vecs, t):
represented_vec[klass_idx] += vec
return represented_vec
def _calculate_center(self, embedding_vecs, t):
n_batch, n_unit, _, _ = embedding_vecs.shape
vecs = embedding_vecs.reshape((n_batch, n_unit))
represented_vec = self._calculate_channel_idx(vecs, t) # (n_class, n_unit)
represented_vec_sum = represented_vec.sum(axis=1)
represented_vec = represented_vec.transpose(1,0)
# normalize
represented_vec = self._normalize(represented_vec, represented_vec_sum)
self.memory = \
(1-self.update_weight)*self.memory \
+ self.update_weight*represented_vec
external_memory_sum = self.memory.sum(axis=1)
t_external_memory = self.memory.transpose(1,0)
# normalize
self.memory = \
self._normalize(t_external_memory, external_memory_sum)
def _compute_attention(self, embedding_vecs):
'''
context_vec: (batch_size, n_unit). default (20, 4096).
'''
n_batch, n_unit, _, _ = embedding_vecs.shape
vecs = embedding_vecs.reshape((n_batch, n_unit))
vecs = self._normalize(vecs.transpose(1,0), vecs.sum(axis=1))
weights = vecs.dot(self.memory.T) # (batch_size, n_class)
weights = self._normalize(weights.transpose(1,0), weights.sum(axis=1))
return weights
|
[
"shin.keeper@gmail.com"
] |
shin.keeper@gmail.com
|
4fa5e4bfbcb5d4dec360160ee5738b8e68aa6d1d
|
60b0e49d153f7581ce89372c2b2d237359872b8d
|
/billboard/wsgi.py
|
48984655e483643c48ff8a25e6963156a348ba03
|
[] |
no_license
|
talkl/billboard
|
b8bd306b3b74ef193351e181077d3149cf534edb
|
040c344c73fb7f6099b7e1d461e2951dea2cbb16
|
refs/heads/master
| 2020-04-16T10:17:00.388726
| 2019-01-13T11:46:18
| 2019-01-13T11:46:18
| 164,896,745
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 402
|
py
|
"""
WSGI config for {{ project_name }} project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "billboard.settings")
application = get_wsgi_application()
|
[
"talkl120@gmail.com"
] |
talkl120@gmail.com
|
7886885f54375d2f98bbbf17d45a2cb79cd2a9ec
|
46040c370239ac6e91372e61d9729a22ba9af33c
|
/lw/translations/models/__init__.py
|
cbb5793539bbcc7a879c4befabaaaa02af0411ee
|
[] |
no_license
|
lesswrong-ru/django-site
|
85e066ab42857fa0d81c98a1efaf2a0dc403d4d2
|
ac4d6f0a5a11751ea9d3eb16f5b8bab53cb4f731
|
refs/heads/master
| 2021-06-17T01:22:58.764120
| 2021-03-26T14:20:11
| 2021-03-26T14:20:11
| 196,754,119
| 0
| 1
| null | 2021-03-26T14:20:12
| 2019-07-13T18:32:09
|
Python
|
UTF-8
|
Python
| false
| false
| 201
|
py
|
from lw.translations.models.translation_index_page import TranslationIndexPage
from lw.translations.models.translation_page import TranslationPage
from lw.translations.models.book_page import BookPage
|
[
"me@berekuk.ru"
] |
me@berekuk.ru
|
e3f9b9ccd9704d797def23c50f582b8c877f8f37
|
9059d9cbad4188ed2980f551151b9678ffb68b44
|
/Chapter12_logging/12-3.logging_config_example.py
|
0262db2fa4267b523bc6fa234849422e7c5042d2
|
[] |
no_license
|
mhee4321/python_basic
|
ad0e64fa21ecfab231a6627ba6abeea82d725690
|
86031975a9121efe5785e83f663255a7b4e4ba77
|
refs/heads/master
| 2023-02-11T20:31:54.353219
| 2021-01-07T05:44:31
| 2021-01-07T05:44:31
| 326,850,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 750
|
py
|
import logging # 로깅 모듈 탑재
import logging.config # 로깅 설정 모듈 탑재
# 설정 파일 읽어 오기
logging.config.fileConfig('12-2.logging.conf')
# 로거 생성
logger = logging.getLogger(__name__) # 로거 생성
# 로그 메시지 출력
logger.debug('이 메시지는 개발자만 이해해요.') # DEBUG 로그 출력
logger.info('생각대로 동작 하고 있어요.') # INFO 로그 출력
logger.warning('곧 문제가 생길 가능성이 높습니다.') # WARNING 로그 출력
logger.error('문제가 생겼어요.기능이 동작 안해요.') # ERROR 로그 출력
logger.critical('시스템이 다운됩니다!!!!') # CRITICAL 로그 출력
|
[
"nannanru@gmail.com"
] |
nannanru@gmail.com
|
99dc52c0d9b077fb1c98d68ddd2a713856f59b3b
|
8cacc6f6ba0f18f0ea051869d631d2b3fa0e64f1
|
/whitechapel_blog/migrations/0007_auto_20170407_1124.py
|
05d461b9cfa2f565b5fa44b091946f6a67c5319b
|
[] |
no_license
|
ddunc23/survey-of-london-whitechapel
|
9099d644a8b3616a6be6b32f9b412e933d65a75d
|
0a37ade163a260d670b020f243e1fa4322f8c655
|
refs/heads/master
| 2022-06-28T14:16:23.167916
| 2018-07-20T15:19:34
| 2018-07-20T15:19:34
| 51,997,516
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 637
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('whitechapel_blog', '0006_post_past_event'),
]
operations = [
migrations.AddField(
model_name='post',
name='post_preview',
field=models.CharField(max_length=140, blank=True),
),
migrations.AddField(
model_name='post',
name='post_thumbnail',
field=models.ImageField(upload_to=b'', null=True, verbose_name=b'Thumbnail Image', blank=True),
),
]
|
[
"hay.duncan@gmail.com"
] |
hay.duncan@gmail.com
|
882ecd305b42a60c92502be43bfd2f9327957b9c
|
443178cedeba3ab80c8d20db7a5b8e6938928e29
|
/serverless/apps/tests/qctokyo/test_notificator.py
|
fc301ec7b634dfbfdbce7c5443a3b29b4c00110c
|
[
"Apache-2.0"
] |
permissive
|
silky/qctokyo
|
ba49aff4dc611d7245fa6bcdb5143a4b1b474553
|
ae81f50b6ddc70f2aa78300c98e4d0d1d33f2f39
|
refs/heads/master
| 2022-10-27T08:11:55.460674
| 2020-06-15T16:09:52
| 2020-06-15T16:09:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,082
|
py
|
import json
from logging import INFO, ERROR
import os
import requests
import requests_mock
from qctokyo import notificator
def setup_module(module):
os.environ["STAGE"] = "dev"
os.environ["SLACK_WEBHOOK_URL"] = "hooks.slack.com/services/dummy"
def teardown_module(module):
del os.environ["STAGE"]
del os.environ["SLACK_WEBHOOK_URL"]
def test_notify_horoscope_success_to_slack(caplog):
event = {
"job_id": "dummy_job_id",
"backend_name": "dummy_backend_name",
"creation_date": "2019-07-01T00:01:02.123456Z",
}
context = None
with requests_mock.Mocker() as m:
m.post("https://hooks.slack.com/services/dummy")
# execution
actual = notificator.notify_horoscope_success_to_slack(event, context)
# validate return value
assert actual == event
assert m.call_count == 1
attachments = json.loads(m.request_history[0].text)["attachments"]
assert len(attachments) == 1
attachment0 = attachments[0]
assert attachment0["color"] == "good"
assert attachment0["pretext"] == "[dev] storing horoscope is success:smile:"
assert (
attachment0["text"]
== "job_id: dummy_job_id\nbackend_name: dummy_backend_name\ncreation_date: 2019-07-01T00:01:02.123456Z UTC"
)
# validate logger
assert caplog.record_tuples[1] == (
"root",
INFO,
"slack_webhook_response status_code=200",
)
def test_notify_horoscope_failed_to_slack(caplog):
event = {
"job_id": "dummy_job_id",
"backend_name": "dummy_backend_name",
"creation_date": "2019-07-01T00:01:02.123456Z",
"detail": {"status": "FAILED"},
}
context = None
with requests_mock.Mocker() as m:
m.post("https://hooks.slack.com/services/dummy")
# execution
actual = notificator.notify_horoscope_failed_to_slack(event, context)
# validate return value
assert actual == event
assert m.call_count == 1
attachments = json.loads(m.request_history[0].text)["attachments"]
assert len(attachments) == 1
attachment0 = attachments[0]
assert attachment0["color"] == "danger"
assert attachment0["pretext"] == "[dev] storing horoscope is failure:rage:"
assert attachment0["text"] == "Check detail!\nstatus: FAILED"
# validate logger
assert caplog.record_tuples[1] == (
"root",
INFO,
"slack_webhook_response status_code=200",
)
def test_notify_horoscope_update_to_slack(caplog):
event = {
"rank1": "<td>Aquarius</td><td>Jan 20 - Feb 18</td>",
"rank2": "<td>Pisces</td><td>Feb 19 - Mar 20</td>",
"rank3": "<td>Aries</td><td>Mar 21 - Apr 19</td>",
"rank4": "<td>Taurus</td><td>Apr 20 - May 20</td>",
"rank5": "<td>Gemini</td><td>May 21 - Jun 20</td>",
"rank6": "<td>Cancer</td><td>Jun 21 - Jul 22</td>",
"rank7": "<td>Leo</td><td>Jul 23 - Aug 22</td>",
"rank8": "<td>Virgo</td><td>Aug 23 - Sep 22</td>",
"rank9": "<td>Libra</td><td>Sep 23 - Oct 22</td>",
"rank10": "<td>Scorpio</td><td>Oct 23 - Nov 21</td>",
"rank11": "<td>Sagittarius</td><td>Nov 22 - Dec 21</td>",
"rank12": "<td>Capricorn</td><td>Dec 22 -Jan 19</td>",
"backend_name": "dummy_backend_name",
"creation_date": "2019-07-01 00:01",
}
context = None
with requests_mock.Mocker() as m:
m.post("https://hooks.slack.com/services/dummy")
# execution
actual = notificator.notify_horoscope_update_to_slack(event, context)
# validate return value
assert actual == event
assert m.call_count == 1
attachments = json.loads(m.request_history[0].text)["attachments"]
assert len(attachments) == 1
attachment0 = attachments[0]
assert attachment0["color"] == "good"
assert attachment0["pretext"] == "[dev] updating horoscope is success:smile:"
expected_text = "\n".join(
[
"received new oracle at 2019-07-01 00:01 UTC",
"1: Aquarius, Jan20-Feb18",
"2: Pisces, Feb19-Mar20",
"3: Aries, Mar21-Apr19",
"4: Taurus, Apr20-May20",
"5: Gemini, May21-Jun20",
"6: Cancer, Jun21-Jul22",
"7: Leo, Jul23-Aug22",
"8: Virgo, Aug23-Sep22",
"9: Libra, Sep23-Oct22",
"10: Scorpio, Oct23-Nov21",
"11: Sagittarius, Nov22-Dec21",
"12: Capricorn, Dec22-Jan19",
"https://www.quantumcomputer.tokyo/horoscope.html",
]
)
assert attachment0["text"] == expected_text
# validate logger
assert caplog.record_tuples[1] == (
"root",
INFO,
"slack_webhook_response status_code=200",
)
def test_notify_horoscope_update_failed_to_slack(caplog):
event = {
"job_id": "dummy_job_id",
"backend_name": "dummy_backend_name",
"creation_date": "2019-07-01T00:01:02.123456Z",
"detail": {"status": "FAILED"},
}
context = None
with requests_mock.Mocker() as m:
m.post("https://hooks.slack.com/services/dummy")
# execution
actual = notificator.notify_horoscope_update_failed_to_slack(event, context)
# validate return value
assert actual == event
assert m.call_count == 1
attachments = json.loads(m.request_history[0].text)["attachments"]
assert len(attachments) == 1
attachment0 = attachments[0]
assert attachment0["color"] == "danger"
assert attachment0["pretext"] == "[dev] updating horoscope is failure:rage:"
assert attachment0["text"] == "Check detail!\nstatus: FAILED"
# validate logger
assert caplog.record_tuples[1] == (
"root",
INFO,
"slack_webhook_response status_code=200",
)
def test_post_slack(caplog):
with requests_mock.Mocker() as m:
m.post("https://hooks.slack.com/services/dummy")
title = "updating horoscope is success:smile:"
color = "good"
detail = "test message"
# execution
notificator._post_slack(title, color, detail)
# validate return value
assert m.call_count == 1
attachments = json.loads(m.request_history[0].text)["attachments"]
assert len(attachments) == 1
attachment0 = attachments[0]
assert attachment0["color"] == "good"
assert attachment0["pretext"] == "[dev] updating horoscope is success:smile:"
assert attachment0["text"] == "test message"
# validate logger
assert caplog.record_tuples == [
("root", INFO, "slack_webhook_response status_code=200"),
]
def test_post_slack_exception(mocker, caplog):
mocker.patch.object(
requests,
"post",
side_effect=requests.exceptions.RequestException("test exception"),
)
title = "updating horoscope is success:smile:"
color = "good"
detail = "test message"
# execution
notificator._post_slack(title, color, detail)
# validate logger
assert caplog.record_tuples == [
("root", ERROR, "failed to call slack_webhook"),
]
|
[
"tsukano@acroquest.co.jp"
] |
tsukano@acroquest.co.jp
|
2b1c5f19d53ea7c2b14a5fb15ce6cec51689d316
|
732585af552e79c8f1bff48a75b6929ff28e106a
|
/pvapp/models/ExperimentData.py
|
5976bf674745532c9d82e8f45341c8638d38b74b
|
[] |
no_license
|
urbanophile/ui
|
7c3b5cef12d273700d0e7971757f83eef696f593
|
a594ace92c3d032b98719afb38777f409342efaa
|
refs/heads/master
| 2020-12-28T20:31:24.283673
| 2015-11-16T04:06:25
| 2015-11-16T04:06:25
| 38,616,477
| 0
| 1
| null | 2015-12-19T02:36:17
| 2015-07-06T12:08:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,159
|
py
|
import numpy as np
import scipy
from util.Constants import (
CHANNEL_INDEX,
)
from test.utils import make_sin_data
class ExperimentData(object):
"""docstring for ExperimentData"""
def __init__(self, data=None, metadata=None):
super(ExperimentData, self).__init__()
# make secondary dataset
data = make_sin_data(duration=100)
self.Data = data
self.RawData = data
self.metadata = metadata
def isDataEmpty(self):
return self.Data is None
def updateRawData(self, data):
self.Data = data
self.RawData = np.copy(data)
def revertData(self):
self.Data = np.copy(self.RawData)
def invertHandler(self, channel):
self.Data[:, CHANNEL_INDEX[channel]] *= -1
self.metadata.inverted_channels[channel] = not self.metadata.inverted_channels[channel]
def offsetHandler(self, offset_type=None, offset=None, channel=None):
if offset_type == 'y':
index = CHANNEL_INDEX[channel]
self.Data[:, index] = self.Data[:, index] + offset
elif offset_type == 'start_x':
self.Data = self.Data[self.Data[:, 0] > offset, :]
elif offset_type == 'end_x':
# so this isn't cumulative
offset = self.RawData[-1, 0] - offset
self.Data = self.Data[self.Data[:, 0] < offset, :]
def fftOperator(self, channel_name, total_duration):
# get FFT of data
# pass FFT of data to plotting function
# frequency spectrum data
t = scipy.linspace(0, total_duration, self.metadata.sample_rate)
signal = self.Data[:, CHANNEL_INDEX[channel_name]]
FFT = abs(scipy.fft(signal))
# returns DFT sample frequencies
freqs = scipy.fftpack.fftfreq(
signal.size, # window length
t[1] - t[0] # sample spacing
)
pos_freqs = freqs[freqs >= 0]
FFT_transf = FFT[len(pos_freqs) - 1:]
# print(pos_freqs.size, FFT_transf.size)
return np.vstack((pos_freqs, FFT_transf)).T
def binHandler(self, bin_size):
self.Data = utils.bin_data(self.Data, bin_size)
|
[
"contact.matt.gibson@gmail.com"
] |
contact.matt.gibson@gmail.com
|
e9056dcc8a8628a344e0ddf4e9add6e257ddabae
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_310/ch25_2019_03_01_00_00_25_791523.py
|
70bb03eaebe4809ffcc0bcea7e9b4073d6f8312b
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 160
|
py
|
km=int(input(distancia):
if km <=200:
preco= km*0.5
print("{:.2f}".format(preco))
else:
preco= km*0.45
print("{:.2f}".format(preco))
|
[
"you@example.com"
] |
you@example.com
|
bf69580ef7eca1b5b2b9c34696e053a163f42e56
|
dd0122daefdc6010dc7409278880d6e9371d67cb
|
/OLD/DAWdjango/mysite/myapp/urls.py
|
0b53d47e81ceea86b1f3af453ef607410702e0be
|
[] |
no_license
|
mblancca/Videoclub-DAW-2018
|
03957db284a86c170dd4395dfa58b116d098fff2
|
cf61d98e1ed9f73884bdf5a2b4bf3802cc8b839c
|
refs/heads/master
| 2021-09-17T14:45:19.576400
| 2018-07-02T19:38:12
| 2018-07-02T19:38:12
| 126,895,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
from django.urls import path
from . import views
from django.conf.urls import include, url
from myapp.views import SignUpView, BienvenidaView, SignInView, SignOutView
urlpatterns = [
#path('', views.acceso, name='acceso'),
path('index/', views.index, name='index'),
url(r'^$', BienvenidaView.as_view(), name='bienvenida'),
url(r'^registrate/$', SignUpView.as_view(), name='sign_up'),
url(r'^incia-sesion/$', SignInView.as_view(), name='sign_in'),
url(r'^cerrar-sesion/$', SignOutView.as_view(), name='sign_out'),
]
|
[
"mariano.blanco.cantero@alumnos.upm.es"
] |
mariano.blanco.cantero@alumnos.upm.es
|
cfd0ccbddf683ab936c9b2d0b3b2a16fbce0b47d
|
8ac9111348c1a6239aa44491c8204b4567bf8a30
|
/mtsp_planner/scripts/solvers/tsp_trajectory.py
|
80ab1922fbbcb65c4bd08ba1418ab61879cb65df
|
[] |
no_license
|
semberecki/mrsss
|
d4f878ade8b1620a3872eee9bf06c6a11939285c
|
e150b07430b4a90e5fd317dc27fcccfb5025db08
|
refs/heads/master
| 2020-06-27T20:54:23.483221
| 2019-08-02T10:41:13
| 2019-08-02T10:41:13
| 200,046,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,162
|
py
|
"""
Custom TSP Loader
@author: R.Penicka
"""
import rospy
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import math
import time
from mrs_msgs.msg import TrackerPoint
from mrs_msgs.msg import TrackerTrajectory
import dubins
# #{ dist_euclidean_squared()
def dist_euclidean_squared(coord1, coord2):
""" euclidean distance between coord1 and coord2"""
(x1, y1) = coord1
(x2, y2) = coord2
return (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)
# #} end of dist_euclidean_squared()
# #{ dist_euclidean()
def dist_euclidean(coord1, coord2):
""" euclidean distance between coord1 and coord2"""
return math.sqrt(dist_euclidean_squared(coord1, coord2))
# #} end of dist_euclidean()
# #{ pos_in_distance()
def pos_in_distance(start , stop , dist):
dist_tot = dist_euclidean(start, stop)
(x1, y1) = start
(x2, y2) = stop
x = x1 + (x2 - x1) * dist / dist_tot
y = y1 + (y2 - y1) * dist / dist_tot
return [x, y]
# #} end of pos_in_distance()
class TSPTrajectory():
# #{ __init__()
def __init__(self, max_velocity, max_acceleration):
self.time_sample = 0.2
self.max_velocity = max_velocity
self.max_acceleration = max_acceleration
# #} end of __init__()
# #{ sample_trajectory_euclidean()
def sample_euclidean_with_stops(self, start, stop, init_velocity=0, final_velocity=0, sample_start_time=0):
""" euclidean trajectory with stops between start and stop"""
#print("sample_euclidean_with_stops from", start, "to", stop)
samples = []
trajectory_part_time = 0
# acc = 0 # no jeck jet
dist_total = dist_euclidean(start, stop)
#print("dist_total", dist_total)
time_from_init_to_max_vel = (self.max_velocity - init_velocity) / self.max_acceleration
time_from_max_to_final_vel = (self.max_velocity - final_velocity) / self.max_acceleration
dist_from_init_to_max_vel = 0.5 * (self.max_velocity + init_velocity) * time_from_init_to_max_vel # average speed * time
dist_from_max_vel_to_final = 0.5 * (self.max_velocity + final_velocity) * time_from_max_to_final_vel # average speed * time
"""
print("time_from_init_to_max_vel", time_from_init_to_max_vel, "s")
print("time_from_max_to_final_vel", time_from_max_to_final_vel, "s")
print("dist_from_init_to_max_vel", dist_from_init_to_max_vel, "m")
print("dist_from_max_vel_to_final", dist_from_max_vel_to_final, "m")
"""
if dist_total < dist_from_init_to_max_vel + dist_from_max_vel_to_final: # can not reach maximal speed in straigh line
#print("can not reach max vel in trajectory")
t = 0
sample = 0
if init_velocity == 0 and final_velocity == 0:
time_to_possible_max_vel = math.sqrt(dist_total / self.max_acceleration)
velocity_in_middle = time_to_possible_max_vel * self.max_acceleration
trajectory_part_time = 2 * time_to_possible_max_vel
else:
if init_velocity > final_velocity: # initial velocity is larger than final, in the end is additinal decelerating
time_final_decel = (init_velocity - final_velocity) / self.max_acceleration
dist_final_decel = time_final_decel * (init_velocity + final_velocity) * 0.5
dist_acc_decc = dist_total - dist_final_decel
time_to_possible_max_vel = (-init_velocity + math.sqrt(init_velocity ** 2 + self.max_acceleration * dist_acc_decc)) / self.max_acceleration
velocity_in_middle = init_velocity + time_to_possible_max_vel * self.max_acceleration
trajectory_part_time = time_to_possible_max_vel + time_final_decel
else:
time_init_accel = (final_velocity - init_velocity) / self.max_acceleration
dist_init_accel = time_init_accel * (init_velocity + final_velocity) * 0.5
dist_acc_decc = dist_total - dist_init_accel
time_to_possible_max_vel = time_init_accel + (-final_velocity + math.sqrt(final_velocity ** 2 + self.max_acceleration * dist_acc_decc)) / self.max_acceleration
velocity_in_middle = init_velocity + time_to_possible_max_vel * self.max_acceleration
"""
print("time_init_accel", time_init_accel)
print("dist_init_accel", dist_init_accel)
print("dist_total", dist_total)
print("dist_acc_decc", dist_acc_decc)
print("such dist is", 0.5 * (velocity_in_middle + init_velocity) * time_to_possible_max_vel * 2)
"""
trajectory_part_time = 2 * time_to_possible_max_vel - time_init_accel
"""
print("time_to_possible_max_vel", time_to_possible_max_vel)
print("velocity_in_middle", velocity_in_middle)
print("sample_start_time", sample_start_time)
"""
while (sample + 1) * self.time_sample <= time_to_possible_max_vel - sample_start_time:
t = (sample + 1) * self.time_sample + sample_start_time
sample += 1
v = init_velocity + self.max_acceleration * t
s = init_velocity * t + 0.5 * self.max_acceleration * (t ** 2)
#print("t", t, "v", v, "s", s, "sample", sample)
pos_in_dist = pos_in_distance(start, stop, s)
samples.append(pos_in_dist)
#print("end acc")
while (sample + 1) * self.time_sample <= trajectory_part_time - sample_start_time:
t = (sample + 1) * self.time_sample + sample_start_time
sample += 1
t_part = t - time_to_possible_max_vel
v = velocity_in_middle - self.max_acceleration * t_part
s = time_to_possible_max_vel * 0.5 * (velocity_in_middle + init_velocity) + velocity_in_middle * t_part - 0.5 * self.max_acceleration * (t_part ** 2)
#print("t", t, "v", v, "s", s, "sample", sample)
pos_in_dist = pos_in_distance(start, stop, s)
samples.append(pos_in_dist)
#print("end decc")
else: # can reach maximal speed in straigh line
#print("can reach max vel")
dist_constant_speed = dist_total - dist_from_init_to_max_vel - dist_from_max_vel_to_final
time_constant_speed = dist_constant_speed / self.max_velocity
trajectory_part_time = time_from_init_to_max_vel + time_constant_speed + time_from_max_to_final_vel
"""
print("time_constant_speed", time_constant_speed, "s")
print("dist_constant_speed", dist_constant_speed, "m")
print("trajectory_part_time", trajectory_part_time)
"""
t = 0
sample = 0
while (sample + 1) * self.time_sample <= time_from_init_to_max_vel - sample_start_time:
t = (sample + 1) * self.time_sample + sample_start_time
sample += 1
v = init_velocity + self.max_acceleration * t
s = init_velocity * t + 0.5 * self.max_acceleration * (t ** 2)
pos_in_dist = pos_in_distance(start, stop, s)
samples.append(pos_in_dist)
#print("t", t, "v", v, "s", s, "sample", sample)
#print("end acc")
while (sample + 1) * self.time_sample <= time_from_init_to_max_vel + time_constant_speed - sample_start_time:
t = (sample + 1) * self.time_sample + sample_start_time
sample += 1
t_part = t - time_from_init_to_max_vel
v = self.max_velocity
s = dist_from_init_to_max_vel + v * t_part
pos_in_dist = pos_in_distance(start, stop, s)
samples.append(pos_in_dist)
#print("t", t, "v", v, "s", s, "sample", sample)
#print("end const")
while (sample + 1) * self.time_sample <= time_from_init_to_max_vel + time_constant_speed + time_from_max_to_final_vel - sample_start_time:
t = (sample + 1) * self.time_sample + sample_start_time
sample += 1
t_part = t - (time_from_init_to_max_vel + time_constant_speed)
v = self.max_velocity - self.max_acceleration * t_part
s = (dist_total - dist_from_max_vel_to_final) + self.max_velocity * t_part - 0.5 * self.max_acceleration * (t_part ** 2)
#print("t", t, "v", v, "s", s, "sample", sample)
pos_in_dist = pos_in_distance(start, stop, s)
samples.append(pos_in_dist)
if final_velocity == 0 and samples[-1] != stop:
#print("t last", "v", 0, "s", dist_total)
samples.append(stop)
return samples, trajectory_part_time
# #} end of sample_trajectory_euclidean()
# #{ sample_trajectory_euclidean()
def sample_trajectory_euclidean(self, sequence):
""" sample euclidean tarjectory over sequence """
print("sample_trajectory_euclidean in sequence", sequence)
samples = []
samples.append(sequence[0]) # add first point of trajectory
trajectory_time = 0
for target_id in range(1, len(sequence)):
from_target = sequence[target_id - 1]
to_target = sequence[target_id]
part_samples, part_time = self.sample_euclidean_with_stops(from_target, to_target)
trajectory_time += part_time
#print("part_time", part_time)
samples.extend(part_samples)
# return samples, trajectory_time
return samples, trajectory_time
# #} end of sample_trajectory_euclidean()
# #{ sampleTrajectoryDubins()
def sample_trajectory_dubins(self, sequence, turning_velocity=None):
""" sample dubins tarjectory over sequence """
print("sample_trajectory_dubins in sequence", sequence)
if turning_velocity is None:
turning_velocity = self.max_velocity
print("using turning_velocity", turning_velocity ," and acceleration ",self.max_acceleration)
turning_radius = (turning_velocity * turning_velocity) / self.max_acceleration
print("which means turning_radius", turning_radius)
sequence_start = 0
init_velocity = 0
time_to_turning_velocity = (turning_velocity - init_velocity) / self.max_acceleration
dist_to_turning_velocity = 0.5 * (turning_velocity + init_velocity) * time_to_turning_velocity # average speed * time
samples = []
sample = 0
t = 0
last_segment_end_time = 0
next_sample_start_time = 0
samples.append(sequence[sequence_start])
#print("t", t, "v", 0, "s", 0, "sample", sample, "start")
for target_id in range(sequence_start + 1, len(sequence)):
start = sequence[target_id - 1]
end = sequence[target_id]
dubins_path = dubins.shortest_path(start, end, turning_radius)
"""
print("segment 0", dubins_path.segment_length(0))
print("segment 1", dubins_path.segment_length(1))
print("segment 2", dubins_path.segment_length(2))
print("dubins lenght", dubins_path.path_length())
"""
# first segment of dubins
if (sample + 1) * self.time_sample < time_to_turning_velocity:
init_velocity = last_segment_end_time * self.max_acceleration
time_accel = (turning_velocity - init_velocity) / self.max_acceleration
dist_accel = init_velocity * time_accel + 0.5 * self.max_acceleration * time_accel * time_accel
if dubins_path.segment_length(0) < dist_accel : # accel whole time
segment_1_time = (-init_velocity + math.sqrt(init_velocity ** 2 + 2 * self.max_acceleration * dubins_path.segment_length(0))) / self.max_acceleration # turning segment 0
else: # accel only part time
segment_1_time = time_accel + (dubins_path.segment_length(0) - dist_accel) / turning_velocity
else:
segment_1_time = dubins_path.segment_length(0) / turning_velocity # turning segment 0
init_velocity = turning_velocity
#print("last_segment_end_time", last_segment_end_time)
#print("segment_1_time",segment_1_time)
acc_time = turning_velocity/self.max_acceleration
segment_1_time_dist = 0.5*self.max_acceleration*acc_time*acc_time + (segment_1_time-acc_time)*turning_velocity
while (sample + 1) * self.time_sample <= last_segment_end_time + segment_1_time:
t = (sample + 1) * self.time_sample - last_segment_end_time
if init_velocity != turning_velocity:
if (sample + 1) * self.time_sample <= time_to_turning_velocity: # still accelerating from init_velocity
s = init_velocity * t + 0.5 * self.max_acceleration * (t ** 2)
else:
dist_init_acc = 0.5 * (turning_velocity + init_velocity) * time_to_turning_velocity # alreaddy accelerated from init_velocity to turning_velocity
time_after_init_acc = t - time_to_turning_velocity
s = dist_init_acc + turning_velocity * time_after_init_acc
else: # already turning velocity from begining
s = turning_velocity * t
sample += 1
samples.append(dubins_path.sample(s))
#print("t", t, "s", s, "sample", sample,"sample time", sample*self.time_sample, "dubins length", dubins_path.path_length(), "dubins part len", dubins_path.segment_length(0), "rot1")
last_segment_end_time += segment_1_time
next_sample_start_time = sample * self.time_sample - last_segment_end_time
if last_segment_end_time < time_to_turning_velocity:
init_velocity = last_segment_end_time * self.max_acceleration
else:
init_velocity = turning_velocity
#print("---------- end fist segment --------------- at time", last_segment_end_time)
# second segment of Dubins
if dubins_path.path_type() != dubins.LRL and dubins_path.path_type() != dubins.RLR: # straight line segment
start_straight_line = dubins_path.sample(dubins_path.segment_length(0))
stop_straight_line = dubins_path.sample(dubins_path.segment_length(0) + dubins_path.segment_length(1))
"""
print("start_straight_line", start_straight_line)
print("stop_straight_line", stop_straight_line)
print("init_velocity", init_velocity)
print("final_velocity", turning_velocity)
print("next_sample_start_time", next_sample_start_time)
"""
straight_line_samples, segment_2_time = self.sample_euclidean_with_stops(start_straight_line[0:2], stop_straight_line[0:2], init_velocity=init_velocity, final_velocity=turning_velocity, sample_start_time=next_sample_start_time)
phi = start_straight_line[2]
straight_line_samples_w_head = [(x, y, phi) for x, y in straight_line_samples]
samples += straight_line_samples_w_head
sample += len(straight_line_samples_w_head)
else: # also circular segment
segment_2_time = dubins_path.segment_length(1) / turning_velocity # turning segment 1
while (sample + 1) * self.time_sample <= last_segment_end_time + segment_2_time:
t = (sample + 1) * self.time_sample - last_segment_end_time
sample += 1
# t_part = t - last_segment_end_time
s = dubins_path.segment_length(0) + turning_velocity * t
samples.append(dubins_path.sample(s))
#print("t", t, "s", s, "sample", sample, "dubins length", dubins_path.path_length(), "rot middle")
last_segment_end_time += segment_2_time
next_sample_start_time = sample * self.time_sample - last_segment_end_time
if (sample + 1) * self.time_sample < time_to_turning_velocity:
init_velocity = last_segment_end_time * self.max_acceleration
else:
init_velocity = turning_velocity
#print("---------- end second segment --------------- at time", last_segment_end_time)
segment_3_time = dubins_path.segment_length(2) / turning_velocity # turning segment 2
while (sample + 1) * self.time_sample <= last_segment_end_time + segment_3_time:
t = (sample + 1) * self.time_sample - last_segment_end_time
sample += 1
if init_velocity != turning_velocity:
s = dubins_path.segment_length(0) + dubins_path.segment_length(1) + init_velocity * t + 0.5 * self.max_acceleration * (t ** 2)
else:
s = dubins_path.segment_length(0) + dubins_path.segment_length(1) + turning_velocity * t
samples.append(dubins_path.sample(s))
#print("t", t, "v", turning_velocity, "s", s, "sample", sample, "rot2")
last_segment_end_time += segment_3_time
next_sample_start_time = sample * self.time_sample - last_segment_end_time
#print("---------- end last segment --------------- at time", last_segment_end_time)
return samples, last_segment_end_time
# #} end of sample_trajectory_dubins()
# #{ plotVelocityProfile()
def plot_velocity_profile(self, samples, color='k',title='Velocity profile'):
figsize = (8, 5)
fig = plt.figure(figsize=figsize)
ax = fig.gca()
ax.set_title(title)
ax.set_ylabel('velocity [m/s]')
ax.set_xlabel('time [s]')
velocities = [0]
for i in range(1, len(samples)):
dist = dist_euclidean(samples[i - 1][0:2], samples[i][0:2])
velocities.append(dist / self.time_sample)
velocities_time = [self.time_sample * i for i in range(len(velocities))]
accelerations = [0]
for i in range(1, len(velocities)):
vel_change = velocities[i] - velocities[i - 1]
accelerations.append(vel_change / self.time_sample)
accelerations_time = [self.time_sample * i for i in range(len(accelerations))]
plt.axhline(self.max_velocity, 0, len(velocities), ls='-', color='k')
plt.plot(velocities_time, velocities, '-', color=color, label='velocity')
plt.axhline(self.max_acceleration, 0, len(accelerations), ls='-.', color='k')
plt.axhline(-self.max_acceleration, 0, len(accelerations), ls='-.', color='k')
plt.plot(accelerations_time, accelerations, '-.', color=color, label='acc')
ax.legend(loc='upper right')
ax2 = ax.twinx()
ax2.set_ylabel('acceleration [m/s^2]')
# #} end of plot_velocity_profile()
# #{ create_ros_trajectory()
def create_ros_trajectory(self, trajectory, height):
""" create the ROS trajectory object """
default_yaw = 1.57
trajectory_msg = TrackerTrajectory()
trajectory_msg.fly_now = False
trajectory_msg.use_yaw = True
trajectory_msg.loop = False
trajectory_msg.start_index = 0
trajectory_msg.header.frame_id = "local_origin"
trajectory_msg.header.stamp = rospy.Time.now();
for point in trajectory:
if len(point) == 2: # x and y
new_point = TrackerPoint()
new_point.x = point[0]
new_point.y = point[1]
new_point.z = height
new_point.yaw = default_yaw
trajectory_msg.points.append(new_point)
elif len(point) == 3: # x, y and yaw
new_point = TrackerPoint()
new_point.x = point[0]
new_point.y = point[1]
new_point.z = height
new_point.yaw = default_yaw
trajectory_msg.points.append(new_point)
return trajectory_msg
# #} end of create_ros_trajectory()
|
[
"penicrob@fel.cvut.cz"
] |
penicrob@fel.cvut.cz
|
8a7818455d82620f37170b9ac842f62c8df49b7a
|
a69d82e6e74d72deb03ef7f4b9682b74f0eba993
|
/gold/scoreboard/modules/available/bitcoin/render
|
7ef432ac087372d8c770685e163de1edd8b69a30
|
[
"WTFPL"
] |
permissive
|
krebs/painload
|
f903e07d71c65b49af009a0b1e7b8f5f68b4a91f
|
2b09702300843947d40c8671776ea4227d1ad6d6
|
refs/heads/master
| 2023-08-31T04:11:51.835401
| 2023-08-20T22:20:40
| 2023-08-20T22:20:40
| 1,703,153
| 17
| 3
|
WTFPL
| 2023-07-06T23:14:49
| 2011-05-04T20:21:18
|
Shell
|
UTF-8
|
Python
| false
| false
| 296
|
#!/usr/bin/python
import json,urllib,datetime,sys
today = datetime.datetime.now()
result = json.load(sys.stdin)
print today.strftime("%Y-%m-%dT%H:%M:%S"),
print '%s' % result["blocks"],
print '%s' % result["difficulty"],
print '%s' % result["keypoololdest"],
#print '%s' % result["keypoolsize"]
|
[
"root@euer.krebsco.de"
] |
root@euer.krebsco.de
|
|
8a4871b4d661ef4a0a122394b00d6b5f55566f2e
|
9d2bafb07baf657c447d09a6bc5a6e551ba1806d
|
/ros2_ws/build/std_msgs/rosidl_generator_py/std_msgs/msg/_multi_array_layout.py
|
e830a59dc03efc5d1893c4f8d32f97cabca4ecd6
|
[] |
no_license
|
weidafan/ros2_dds
|
f65c4352899a72e1ade662b4106e822d80a99403
|
c0d9e6ff97cb7cc822fe25a62c0b1d56f7d12c59
|
refs/heads/master
| 2021-09-05T20:47:49.088161
| 2018-01-30T21:03:59
| 2018-01-30T21:03:59
| 119,592,597
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,630
|
py
|
# generated from rosidl_generator_py/resource/_msg.py.em
# generated code does not contain a copyright notice
import logging
import traceback
class Metaclass(type):
"""Metaclass of message 'MultiArrayLayout'."""
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('std_msgs')
except ImportError:
logger = logging.getLogger('rosidl_generator_py.MultiArrayLayout')
logger.debug(
'Failed to import needed modules for type support:\n' + traceback.format_exc())
else:
cls._CONVERT_FROM_PY = module.convert_from_py_msg_multi_array_layout
cls._CONVERT_TO_PY = module.convert_to_py_msg_multi_array_layout
cls._TYPE_SUPPORT = module.type_support_msg_multi_array_layout
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg_multi_array_layout
from std_msgs.msg import MultiArrayDimension
if MultiArrayDimension.__class__._TYPE_SUPPORT is None:
MultiArrayDimension.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class MultiArrayLayout(metaclass=Metaclass):
"""Message class 'MultiArrayLayout'."""
__slots__ = [
'_dim',
'_data_offset',
]
def __init__(self, **kwargs):
assert all(['_' + key in self.__slots__ for key in kwargs.keys()]), \
'Invalid arguments passed to constructor: %r' % kwargs.keys()
self.dim = kwargs.get('dim', list())
self.data_offset = kwargs.get('data_offset', int())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = [s[1:] + '=' + repr(getattr(self, s, None)) for s in self.__slots__]
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
@property
def dim(self):
"""Message field 'dim'."""
return self._dim
@dim.setter
def dim(self, value):
from std_msgs.msg import MultiArrayDimension
from collections import Sequence
from collections import Set
from collections import UserList
from collections import UserString
assert \
((isinstance(value, Sequence) or
isinstance(value, Set) or
isinstance(value, UserList)) and
not isinstance(value, str) and
not isinstance(value, UserString) and
all([isinstance(v, MultiArrayDimension) for v in value]) and
True), \
"The 'dim' field must be a set or sequence and each value of type 'MultiArrayDimension'"
self._dim = value
@property
def data_offset(self):
"""Message field 'data_offset'."""
return self._data_offset
@data_offset.setter
def data_offset(self, value):
assert \
isinstance(value, int), \
"The 'data_offset' field must of type 'int'"
assert value >= 0 and value < 4294967296, \
"The 'data_offset' field must be an unsigned integer in [0, 4294967296)"
self._data_offset = value
|
[
"austin.tisdale.15@cnu.edu"
] |
austin.tisdale.15@cnu.edu
|
d67b665d803c3b6550f549ca34cb453a74320f06
|
02f0e89f7e3a67fe5787f2cc7c7e7d790efb6541
|
/pac_bot/network.py
|
ca9f1677557c6ca4286f5ace8095d98751fb3770
|
[] |
no_license
|
walter090/PacManBot
|
c087e365ac404314e2cfe8ed44e1d58941868424
|
e177d2c7572a9607de6baa6db546e0544edc2984
|
refs/heads/master
| 2021-07-07T03:41:49.973861
| 2017-10-06T18:54:42
| 2017-10-06T18:54:42
| 103,622,161
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,650
|
py
|
import tensorflow as tf
from . import layers
class Network(object):
def __init__(self):
self._encoded = None
self._lstm = None
self._lstm_output = None
def encoding_network(self, x, layers_config=None, activation='lrelu', name='encoding'):
"""Build the encoding network.
Args:
x: Input tensor.
layers_config: Configuration for each convolution layer;
each layer is a list with three elements: filter size, stride, and
number of output channels.
activation: Choose activation function, between 'lrelu' and 'elu.'
name: Name of variable scope.
Returns:
fc: Output from the conv net.
"""
with tf.variable_scope(name):
if layers_config is None:
layers_config = [
# Filter size, stride, num output channels
[(8, 8), (2, 2), 32],
[(8, 8), (4, 4), 64],
[(4, 4), (2, 2), 128],
[(4, 4), (2, 2), 256],
]
conv_output = x
for layer in layers_config:
conv_output = layers.conv_layer(x=conv_output,
conv_ksize=layer[0],
conv_stride=layer[1],
out_channels=layer[2],
activation=activation)
flattened = layers.flatten(conv_output)
fc = layers.fully_conn(x=flattened, num_output=516)
self._encoded = layers.fully_conn(x=fc, num_output=256)
return self._encoded
def lstm_network(self, x, actions, cell_size=512, stacked_layers=4, name='lstm'):
"""Build the LSTM network.
Args:
x: Input tensor.
actions: List of available actions.
cell_size: LSTM cell size.
stacked_layers: Number of stacked LSTM cells.
name: Name for the variable scope.
Returns:
"""
with tf.variable_scope(name):
self._lstm, _ = layers.lstm(x=x,
cell_size=cell_size,
stacked_layers=stacked_layers)
lstm_flattened = layers.flatten(self._lstm)
self._lstm_output = layers.fully_conn(lstm_flattened,
num_output=len(actions),
activation='softmax')
return self._lstm_output
|
[
"walter.wu090@gmail.com"
] |
walter.wu090@gmail.com
|
e844333d221d13709680224fc0f09e232050f599
|
f21ec456a7a1d136e70585bbdf34a72e19cd418c
|
/examples/her/her_ant_goal.py
|
65d0dea6ae25948d7f88ddffab00c0330713f37e
|
[
"MIT"
] |
permissive
|
cmoyacal/rlkit-offline-rl-benchmark
|
6d22b57913dc8039b223c03648e0fd28e2ecba79
|
55193e63b2e3303fba1815dc1754fc94dd6850e0
|
refs/heads/master
| 2023-03-19T19:07:23.569069
| 2020-03-20T22:03:21
| 2020-03-20T22:03:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,937
|
py
|
import gym
import rlkit.torch.pytorch_util as ptu
from rlkit.data_management.obs_dict_replay_buffer import ObsDictRelabelingBuffer
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import GoalConditionedPathCollector
from rlkit.torch.her.her import HERTrainer
from rlkit.torch.networks import FlattenMlp
from rlkit.torch.sac.policies import MakeDeterministic, TanhGaussianPolicy
from rlkit.torch.sac.sac import SACTrainer
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
from locomotion import ant, swimmer, point
def experiment(variant):
if variant['env_name'] == 'Ant-Goal':
eval_env = ant.GoalReachingAntDictEnv(expose_all_qpos=True)
expl_env = ant.GoalReachingAntDictEnv(expose_all_qpos=True)
elif variant['env_name'] == 'Ant-Maze':
expl_env = ant.MazeReachingAntDictEnv(expose_all_qpos=True)
eval_env = ant.MazeReachingAntDictEnv(expose_all_qpos=True)
elif variant['env_name'] == 'Point-Goal':
expl_env = point.GoalReachingPointDictEnv(expose_all_qpos=True)
eval_env = point.GoalReachingPointDictEnv(expose_all_qpos=True)
elif variant['env_name'] == 'Swimmer-Goal':
expl_env = swimmer.GoalReachingSwimmerDictEnv(expose_all_qpos=True)
eval_env = swimmer.GoalReachingSwimmerDictEnv(expose_all_pos=True)
observation_key = 'observation'
desired_goal_key = 'desired_goal'
# import ipdb; ipdb.set_trace()
achieved_goal_key = desired_goal_key.replace("desired", "achieved")
replay_buffer = ObsDictRelabelingBuffer(
env=eval_env,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
achieved_goal_key=achieved_goal_key,
**variant['replay_buffer_kwargs']
)
obs_dim = eval_env.observation_space.spaces['observation'].low.size
action_dim = eval_env.action_space.low.size
goal_dim = eval_env.observation_space.spaces['desired_goal'].low.size # Hardcoded for now
print (obs_dim, action_dim, goal_dim)
qf1 = FlattenMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
qf2 = FlattenMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf1 = FlattenMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
target_qf2 = FlattenMlp(
input_size=obs_dim + action_dim + goal_dim,
output_size=1,
**variant['qf_kwargs']
)
policy = TanhGaussianPolicy(
obs_dim=obs_dim + goal_dim,
action_dim=action_dim,
**variant['policy_kwargs']
)
eval_policy = MakeDeterministic(policy)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['sac_trainer_kwargs']
)
trainer = HERTrainer(trainer)
eval_path_collector = GoalConditionedPathCollector(
eval_env,
eval_policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
expl_path_collector = GoalConditionedPathCollector(
expl_env,
policy,
observation_key=observation_key,
desired_goal_key=desired_goal_key,
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == '__main__':
variant = dict(
algorithm='HER-SAC',
version='normal',
env_name='Point-Goal',
algo_kwargs=dict(
batch_size=128,
num_epochs=1000,
num_eval_steps_per_epoch=5000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=20,
),
sac_trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
replay_buffer_kwargs=dict(
max_size=int(1E6),
fraction_goals_rollout_goals=0.0, # equal to k = 4 in HER paper
fraction_goals_env_goals=0.0,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
)
setup_logger('her-sac-ant_experiment', variant=variant)
ptu.set_gpu_mode(True)
experiment(variant)
|
[
"aviralkumar2907@gmail.com"
] |
aviralkumar2907@gmail.com
|
ddb052956a9b3c82471c441582e3b5d983378e07
|
690ed5a6611f9b3ffb272fce1c48ce7ded67791d
|
/audio/dial_tone_wav.py
|
6a0b51ebfd6c3ea6491853ebeae69988ab675f2d
|
[] |
no_license
|
dservo/expl_wireless
|
96d0ec4e783a3834c35edc156f89a0d83fba74d5
|
31a40b176c13978d5752372aa01082db1025b1f3
|
refs/heads/master
| 2020-04-07T05:46:28.576866
| 2018-11-20T16:57:30
| 2018-11-20T16:57:30
| 158,109,241
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,499
|
py
|
#!Z:\gr-build\src-stage2-python\gr-python27\python.exe
#
# Copyright 2004,2005,2007,2008,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# GNU Radio example program to record a dial tone to a WAV file
from gnuradio import gr
from gnuradio import blocks
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
usage = "%prog: [options] filename"
parser = OptionParser(option_class=eng_option, usage=usage)
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
parser.add_option("-N", "--samples", type="eng_float", default=None,
help="number of samples to record")
(options, args) = parser.parse_args ()
if len(args) != 1 or options.samples is None:
parser.print_help()
raise SystemExit, 1
sample_rate = int(options.sample_rate)
ampl = 0.1
src0 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 350, ampl)
src1 = analog.sig_source_f(sample_rate, analog.GR_SIN_WAVE, 440, ampl)
head0 = blocks.head(gr.sizeof_float, int(options.samples))
head1 = blocks.head(gr.sizeof_float, int(options.samples))
dst = blocks.wavfile_sink(args[0], 2, int(options.sample_rate), 16)
self.connect(src0, head0, (dst, 0))
self.connect(src1, head1, (dst, 1))
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
|
[
"noreply@github.com"
] |
dservo.noreply@github.com
|
4635cefeb2889b33eeeaced1b5abbd2302c4e626
|
20a1250a7fb5f601b12d58acfd2607fa6d8d672f
|
/knockblock/knockblock/tasks.py
|
957924e88f6dc0bcfaf013c13e3c3f250889133b
|
[] |
no_license
|
pboes/Knockblock
|
a7aa9e523c69a795e6c914c2a6ddb189f06a0cc1
|
420357fef5df5e1949cf6b90fd5c17b7194e3dba
|
refs/heads/main
| 2023-02-19T10:22:29.965680
| 2021-01-19T10:25:47
| 2021-01-19T10:25:47
| 330,638,902
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
from __future__ import absolute_import
#from django.conf import settings
from celery import shared_task
import json
@shared_task(name="twitter_job")
def twitter_job(twitt_input):
idd = twitter_job.request.id
inputdict = {"inputt": twitt_input,"idd" : idd}
print inputdict
#sys.argv = [settings.STATIC_BREV + static('last24h/tweet.py'), inputt]
execfile('static/tweet.py',inputdict)
|
[
"pvboesgmail.com"
] |
pvboesgmail.com
|
bc889162571b4332bdf0ce4453e6b6200902ad6e
|
103a0b3baba5a8fecade0fe4aca0aef68bf875b8
|
/projects/helloapp/howdy/urls.py
|
08ad118e8a783ebd0669998d01f5d4bc9ece8b8c
|
[] |
no_license
|
royce-matthew/royce-matthew.github.io
|
e1acdd10b27c6e98c5fadff62d7a0b6036030ad6
|
d5a107364ef3f155bcb3856f0e2e69519a713559
|
refs/heads/master
| 2021-05-02T02:57:11.567475
| 2018-07-02T16:32:29
| 2018-07-02T16:32:29
| 120,889,131
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from howdy import views
from rest_framework import generics
urlpatterns = [
url(r'^$', views.HomePageView.as_view()),
url(r'^products/$', views.ProductList.as_view()),
url(r'^products/(?P<pk>[0-9]+)/$', views.ProductDetail.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
[
"royce@Royces-MacBook-Pro.local"
] |
royce@Royces-MacBook-Pro.local
|
e0829e4a8d084cfe4241b760879020b048ba1451
|
09b6b680637080805587e83f8d5c1c166a0dc476
|
/asset_management/myapp/views.py
|
6dbe31081198406230cdcdfb09ddd0f175dd38f7
|
[] |
no_license
|
Mamtha-Vathar/Asset_Management
|
8ce19b6068ce1c47f375cb83cfc8a9e1a719c1bf
|
172ef8a5e2a4f774c0e7ea6f9fca926edc22f8c2
|
refs/heads/master
| 2023-01-14T09:13:14.040053
| 2020-11-25T17:31:56
| 2020-11-25T17:31:56
| 315,995,208
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,189
|
py
|
from pyexpat.errors import messages
import logging, requests
import urllib
from django.views.generic import View
from .models import motor
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from django.core.serializers import json
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.template import Context, loader
from django.db.models import Count, Q
from django.shortcuts import render
from django.urls import reverse
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions
from django.contrib.auth.models import User
from django.shortcuts import render, get_object_or_404
def index(request):
template = loader.get_template("./myapp/index.html")
return HttpResponse(template.render())
def login(request):
template = loader.get_template("./myapp/login.html")
return HttpResponse(template.render())
def register(request):
template = loader.get_template("./myapp/register.html")
return HttpResponse(template.render())
def showmotors(request):
last_five =motor.objects.order_by('-id')[:6]
allmotors = reversed(last_five)
context = {'allmotors': allmotors}
return context
def anomalies(request):
path = './myapp/anomalies.html'
data = showmotors(request)
return render(request, path, data)
def manuals(request):
template = loader.get_template("./myapp/manuals.html")
return HttpResponse(template.render())
def spareparts(request):
template = loader.get_template("./myapp/spareparts.html")
return HttpResponse(template.render())
def aboutus(request):
template = loader.get_template("./myapp/aboutus.html")
return HttpResponse(template.render())
def dashboard(request):
template = loader.get_template("./myapp/dashboard.html")
return HttpResponse(template.render())
# def home(request):
# template = loader.get_template("./charts.html")
# return HttpResponse(template.render())
def geolocation(request):
template = "./myapp/index.html"
loc = loc_data(request)
return render(request, template, loc)
def temperature(request):
template = loader.get_template("./myapp/temperature.html")
return HttpResponse(template.render())
def Current(request):
template = loader.get_template("./myapp/current.html")
return HttpResponse(template.render())
def Voltage(request):
template = loader.get_template("./myapp/voltage.html")
return HttpResponse(template.render())
def trial(request):
template = loader.get_template("./myapp/trial.html")
return HttpResponse(template.render())
def add_data(request):
motor_data = []
url = 'https://api.thingspeak.com/channels/984447/feeds.json'
header = {'Content-Type': 'application/json'}
r = requests.get(url, headers=header)
data = r.json()
count = motor.objects.all().count()
for i in range(count, (len(data["feeds"]))):
motor.objects.create(
mid="m1",
Temp=float(data["feeds"][i]["field1"]),
voltage=float(data["feeds"][i]["field2"]),
current=float(data["feeds"][i]["field3"]),
x=float(data["feeds"][i]["field4"]),
y=float(data["feeds"][i]["field5"]),
z=float(data["feeds"][i]["field6"]),
status=(data["feeds"][i]["field7"][2:-2]),
)
return Response('Data added successfully')
def loc_data(request):
loc = []
url = 'https://api.thingspeak.com/channels/984447/feeds.json'
header = {'Content-Type': 'application/json'}
r = requests.get(url, headers=header)
data = r.json()
# for i in range(len(data["feeds"])):
# lat = float(data["feeds"][i]["field7"])
# lon = float(data["feeds"][i]["field8"])
lat = 12.940538;
lon = 77.566287;
context = {'lat': lat, 'lon': lon}
return context
def get(self, request):
data = example.objects.all()
context = {'data': data}
print(data)
return render(request, './myapp/anomalies.html', context)
def firebase(request):
return render(request, './myapp/firebase.html')
# Create your views here.
|
[
"mamt17cs@cmrit.ac.in"
] |
mamt17cs@cmrit.ac.in
|
3cc7dc94fdb029bb70bc409a3dc8ffef0368bf06
|
2cec0797981b73c497866a75fb6d33f4c3a4c06c
|
/brain_tumor_classification/modules/data/utils.py
|
e5cd18bf3458f2de6aa299ac09b545c77cfc04b4
|
[] |
no_license
|
Vadbeg/brain_tumor_classification
|
ed44e50076627a0682e2eca13cf115716c510ed1
|
ba87b65717cd1fe75871f3108db1394de271c62d
|
refs/heads/master
| 2023-08-01T13:46:27.176780
| 2021-09-19T15:14:32
| 2021-09-19T15:14:32
| 397,667,617
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,718
|
py
|
"""Module with utilities for dataset"""
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
from monai.transforms import (
AddChanneld,
Compose,
LoadImaged,
Resized,
ScaleIntensityRanged,
Transform,
)
from torch.utils.data import DataLoader, Dataset
def get_train_val_paths(
train_path: Union[str, Path],
train_split_percent: float = 0.7,
ct_file_extension: str = '*.nii.gz',
item_limit: Optional[int] = None,
shuffle: bool = True,
) -> Tuple[List[Path], List[Path]]:
train_path = Path(train_path)
list_of_paths = list(train_path.glob(ct_file_extension))
if shuffle:
np.random.shuffle(list_of_paths)
edge_value = int(train_split_percent * len(list_of_paths))
train_list_of_paths = list_of_paths[:edge_value]
val_list_of_paths = list_of_paths[edge_value:]
if item_limit:
train_list_of_paths = train_list_of_paths[:item_limit]
val_list_of_paths = val_list_of_paths[:item_limit]
return train_list_of_paths, val_list_of_paths
def create_data_loader(
dataset: Dataset, batch_size: int = 1, shuffle: bool = True, num_workers: int = 2
) -> DataLoader:
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
)
return data_loader
def get_load_transforms(
img_key: str,
original_min: float = 0.0,
original_max: float = 200.0,
res_min: float = 0.0,
res_max: float = 1.0,
spatial_size: Tuple[int, int, int] = (196, 196, 128),
) -> Compose:
preprocessing_transforms = get_preprocessing_transforms(
img_key=img_key,
original_min=original_min,
original_max=original_max,
res_min=res_min,
res_max=res_max,
spatial_size=spatial_size,
)
load_transforms = Compose(
[LoadImaged(keys=[img_key], dtype=np.float32), preprocessing_transforms]
)
return load_transforms
def get_preprocessing_transforms(
img_key: str,
original_min: float = 0.0,
original_max: float = 200.0,
res_min: float = 0.0,
res_max: float = 1.0,
spatial_size: Tuple[int, int, int] = (196, 196, 128),
) -> Compose:
preprocessing_transforms = Compose(
[
AddChanneld(keys=[img_key]),
ScaleIntensityRanged(
keys=[img_key],
a_min=original_min,
a_max=original_max,
b_min=res_min,
b_max=res_max,
clip=True,
),
Resized(keys=[img_key], spatial_size=spatial_size),
]
)
return preprocessing_transforms
|
[
"vadbeg@tut.by"
] |
vadbeg@tut.by
|
df6468f53af378e405bda553802e02e64b84587f
|
fa1e281807939511dbbefaf58e8ecd59436188c4
|
/cinebot_mini_display_server/server.py
|
85674e722628a4bb7a25e6fffdc7eb69bd291c3f
|
[
"MIT"
] |
permissive
|
cheng-chi/cinebot_mini
|
2292204fad3d072012d8b336033430f394e7889f
|
708a7c80d2f203dfe3b52bf84d9cbafac7673d27
|
refs/heads/master
| 2020-05-20T10:29:33.526060
| 2019-05-08T04:16:53
| 2019-05-08T04:16:53
| 185,526,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,845
|
py
|
from cinebot_mini import SERVERS
from aiohttp import web
import aiohttp_cors
import json
from copy import *
import os
from shutil import copy2
import asyncio
from aiohttp_sse import sse_response
screenConfirmed = {}
screenDisplay = {}
nextDisplay = {}
routes = web.RouteTableDef()
routes.static('/ui', 'lightbox-ui', name='ui')
routes.static('/api/static', 'static', name='static')
@routes.get('/')
async def root_handler(request):
return web.HTTPFound('/ui/index.html')
@routes.get('/api')
async def hello(request):
return web.Response(text="Hello, world")
@routes.get('/api/screens')
async def screens(request):
cfg = {}
with open("screenCfg.json", "r") as cfgFile:
try:
cfg = json.load(cfgFile)
except:
pass
for k in cfg:
cfg[k]["width_pixels"] = float(cfg[k]["wPx"])
cfg[k]["height_pixels"] = float(cfg[k]["hPx"])
if ("wCm" in cfg[k]):
cfg[k]["width_meters"] = float(cfg[k]["wCm"]) / 100
cfg[k]["height_meters"] = cfg[k]["height_pixels"] / cfg[k]["width_pixels"] * cfg[k]["width_meters"]
del cfg[k]["wCm"]
else:
cfg[k]["width_meters"] = float(cfg[k]["wPx"]) / float(cfg[k]["ppcm"]) / 100
cfg[k]["height_meters"] = float(cfg[k]["hPx"]) / float(cfg[k]["ppcm"]) / 100
del cfg[k]["wPx"]
del cfg[k]["hPx"]
del cfg[k]["ppcm"]
return web.json_response(cfg)
@routes.get('/api/confirm/{id}')
async def confirm(request):
global screenConfirmed
screenid = request.match_info["id"]
screenConfirmed[screenid] = True
print(screenConfirmed)
return web.Response(text="confirmed, screen {}".format(screenid))
@routes.post('/api/login')
async def login(request):
global screenConfirmed
data = await request.text()
data = json.loads(data)
screenid = data["screenid"]
wPx = None if ("wPx" not in data or data["wPx"] == "") else data["wPx"]
hPx = None if ("hPx" not in data or data["hPx"] == "") else data["hPx"]
ppcm = None if ("ppcm" not in data or data["ppcm"] == "") else data["ppcm"]
wCm = None if ("wCm" not in data or data["wCm"] == "") else data["wCm"]
print("received dimensions: ", (wPx, hPx))
with open("screenCfg.json", "a+") as file:
pass
prevCfg = {}
with open("screenCfg.json", "r+") as cfgFile:
try:
prevCfg = json.load(cfgFile)
except:
pass
resp = {}
with open("screenCfg.json", "w") as cfgFile:
config = deepcopy(prevCfg)
if (wPx is not None and hPx is not None):
config[str(screenid)] = {
"wPx": wPx,
"hPx": hPx
}
resp["wPx"] = wPx
resp["hPX"] = hPx
else:
print(config, screenid)
if (str(screenid) in config):
wPx = config[screenid]["wPx"]
hPx = config[screenid]["hPx"]
else:
(wPx, hPx) = (1920, 1080)
if (ppcm is None):
if (str(screenid) in prevCfg and "ppcm" in prevCfg[str(screenid)]):
ppcm = prevCfg[str(screenid)]["ppcm"]
else:
ppcm = 72
config[str(screenid)] = {
"wPx": wPx,
"hPx": hPx,
"ppcm": ppcm
}
if (wCm is not None):
config[str(screenid)]["wCm"] = wCm
json.dump(config, cfgFile)
resp = {
"wPx": wPx,
"hPx": hPx,
"rgb": [128, 128, 128]
}
screenConfirmed[screenid] = False
screenDisplay[screenid] = {"rgb": resp["rgb"]}
return web.json_response(resp)
@routes.put('/api/showimgs')
async def showimgs(request):
global screenDisplay, nextDisplay
data = await request.text()
data = json.loads(data)
resp = {}
for d in data:
id = d["id"]
if (id not in screenDisplay):
resp = {
"status": "failed",
"message": "no screen with id {}".format(id)
}
break
if ("img_url" in d):
nextDisplay[id] = {
"img_url": d["img_url"]
}
elif ("img_path" in d):
path = d["img_path"]
if (not os.path.isfile(path)):
resp = {
"status": "failed",
"message": "unable to find file {}".format(path)
}
break
splitted = os.path.split(path)
prefixHash = hash(splitted[0])
if (prefixHash < 0):
prefix = hex(prefixHash).lstrip("-0x") + "_1"
else:
prefix = hex(prefixHash).lstrip("0x") + "_0"
filename = "{}_{}".format(prefix, splitted[1])
if (not os.path.isfile("./static/{}".format(filename))):
copy2(path, "./static/{}".format(filename))
nextDisplay[id] = {
"img_path": str(resources.url_for(filename=filename))
}
elif ("rgb" in d):
nextDisplay[id] = {
"rgb": d["rgb"]
}
if (len(resp) == 0):
resp["status"] = "success"
return web.json_response(resp)
# @routes.get('/changeImg')
# async def changeImg(request):
# global imageIdx, nextIdx
# nextIdx = (imageIdx + 1) % 2
# return web.Response(text="done")
@routes.get('/api/serverloop/{id}')
async def hello(request):
global screenDisplay, nextDisplay, screenConfirmed
screenid = request.match_info["id"]
loop = request.app.loop
async with sse_response(request) as resp:
while True:
await asyncio.sleep(0.01, loop=loop)
if (screenid not in screenDisplay): continue
# print("screen ", screenid, screenDisplay[screenid])
if (screenid not in nextDisplay): continue
if (screenDisplay[screenid] != nextDisplay[screenid]):
# print("sending message now, ", screenid)
screenConfirmed[screenid] = False
screenDisplay[screenid] = nextDisplay[screenid]
del nextDisplay[screenid]
# await resp.send(str(resources.url_for(filename="image_{}.jpg".format(imageIdx))))
await resp.send(json.dumps(screenDisplay[screenid]))
app = web.Application()
app.add_routes(routes)
cors = aiohttp_cors.setup(app, defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*",
)
})
resources = app.router.named_resources().get("static")
print(type(resources))
for route in list(app.router.routes()):
cors.add(route)
print(route)
server_config = SERVERS["display"]
web.run_app(app, host=server_config["host"], port=server_config["port"])
|
[
"chicheng.usa@gmail.com"
] |
chicheng.usa@gmail.com
|
d7bbfb79de6ceb4fe82fa686f5fa0434db9fbde8
|
d608974e671f57b949950125108da49c43753725
|
/news_app/models.py
|
da8b15bdfc64fb873a59cb71fc7c593446d30498
|
[] |
no_license
|
swooshnews/swoosh-news
|
67b1cb0f520b723e29913f0aeda6eb34fd188a5c
|
0530ecd5e0632519376496cbc0701779371987a1
|
refs/heads/master
| 2021-01-23T20:14:13.313887
| 2009-09-18T00:50:27
| 2009-09-18T00:50:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,434
|
py
|
"""
These are models for news that are in the database.
"""
from django.db import models
from django.contrib.auth.models import User
import news.conf as news_settings
# This is used to set up the signals and callbacks for this app.
# The signal stuff needs to get imported early on, so it should go here.
import news.signals
import datetime
from urlparse import urlparse
class UserProfile(models.Model):
"""
This is a profile to a user. It is intrinsically
linked to django's default User model. This UserProfile
holds additional info about the user.
"""
# a link to the user's website
website = models.URLField(default="", verify_exists=False,
max_length=news_settings.NEWS_MAX_URL_LENGTH)
# the remaining comment points
comment_points = models.IntegerField(default=0)
user = models.ForeignKey(User, unique=True)
date_created = models.DateTimeField(default=datetime.datetime.now)
# the about box on the user's profile page
about = models.TextField(default="")
option_show_email = models.BooleanField(default=False)
option_use_javascript = models.BooleanField(default=False)
option_show_dead = models.BooleanField(default=False)
# log user's ip address
created_from_ip = models.IPAddressField(null=True)
last_login_ip = models.IPAddressField(null=True)
def __unicode__(self):
return self.user.username
def _get_username(self):
return self.user.username
username = property(_get_username)
class Rated(models.Model):
"""
This is the manager for the ManyToManyField
holding the person who rates the Rankable object.
"""
DIRECTION_CHOICES = (('up', 'Up'), ('down', 'Down'))
rankable = models.ForeignKey('Rankable')
userprofile = models.ForeignKey(UserProfile)
date_rated = models.DateTimeField(default=datetime.datetime.now)
direction = models.CharField(max_length=4, choices=DIRECTION_CHOICES)
ranked_from_ip = models.IPAddressField(null=True)
class Rankable(models.Model):
"""
This is anything that can be ranked (i.e., have a rating).
Mainly this is just for Comment and NewsItem.
"""
poster = models.ForeignKey(UserProfile)
date_posted = models.DateTimeField(default=datetime.datetime.now)
# if a rankable has been deleted
dead = models.BooleanField(default=False)
# http://www.djangoproject.com/documentation/models/many_to_many/
raters = models.ManyToManyField(UserProfile, related_name="rated_%(class)ss",
through='Rated')
# the rating for the field (if someone up votes this Rankable,
# the rating will go up by one)
rating = models.IntegerField(default=1)
# the ranking, which is calculated by the function below
ranking = models.FloatField(default=0)
last_ranked_date = models.DateTimeField(default=datetime.datetime.now)
posted_from_ip = models.IPAddressField(null=True)
def calculate_ranking(self):
"""
Easy algorithm:
http://www.seomoz.org/blog/reddit-stumbleupon-delicious-and-hacker-news-algorithms-exposed
More complicated algorithm:
http://arcfn.com/2009/06/how-does-newsyc-ranking-work.html
"""
delta = datetime.datetime.now() - self.date_posted
hours = (delta.days * 24.0) + (delta.seconds / 3600.0)
# simpler way
return (self.rating - 1.0) / ((hours + 2.0) ** 1.5)
# more compllicated way
# return ((rating - 1.0) ** .8) / ((hours + 2.0) ** 1.8)
def update_ranking(self):
"""
Update the ranking for the Rankable.
The rankable is saved to the database after updating.
"""
self.ranking = self.calculate_ranking()
self.last_ranked_date = datetime.datetime.now()
# TODO: should this save?
self.save()
def is_news_item(self):
"""Returns true if this rankable is a news item."""
if NewsItem.objects.filter(id=self.id):
return True
else:
return False
def is_comment(self):
"""Returns true if this rankable is a comment."""
if Comment.objects.filter(id=self.id):
return True
else:
return False
def __unicode__(self):
uni_string = ""
if self.is_news_item():
uni_string += "NewsItem: " + unicode(self.newsitem)
elif self.is_comment():
uni_string += "Comment: " + unicode(self.comment)
else:
uni_string += "id " + unicode(self.id)
return uni_string
def num_child_comments(self):
"""
This returns the number of comments a news item (or a comment) has
"""
if hasattr(self, 'child_set'):
from news.helpers import get_child_comments
return len(get_child_comments(self.child_set, return_dead=False))
else:
return 0
def has_children(self):
"""
This returns wheter or not a news item (or comment) has children.
"""
if hasattr(self, 'child_set'):
from news.helpers import get_child_comments
return len(get_child_comments(self.child_set)) > 0
else:
return False
def has_nondead_children(self):
"""
This returns wheter or not a news item (or comment) has children and
at least one of them is not dead.
"""
if hasattr(self, 'child_set'):
from news.helpers import get_child_comments
return len(get_child_comments(self.child_set, return_dead=False)) > 0
else:
return False
def can_be_edited(self, userprofile):
""" Return whether the given userprofile can edit a rankable. """
from news.helpers import datetime_ago
if self.poster == userprofile and \
self.date_posted > \
datetime_ago(minutes=news_settings.NEWS_COMMENT_EDITABLE_MINS):
return True
return False
def can_be_deleted(self, userprofile):
""" Return whether the given userprofile can delete a rankable. """
return self.can_be_edited(userprofile)
def get_parent_news_item(self):
"""
If this rankable is a news item, just return the
news item. If this rankable is a comment,
return the news item this comment is posted to.
"""
if self.is_news_item():
return self.newsitem
elif self.is_comment():
return self.comment.get_newsitem()
else:
return None
def can_be_downvoted(self):
""" Return true if this rankable can be down-voted. """
if self.is_comment():
return False
else:
return True
def can_post_comment(self, userprofile):
"""
Return True if userprofile can post a comment to this rankable.
Checks to make sure whehter we are posting to a regular news item,
or an "Ask SN:" news item.
"""
return userprofile.comment_points - \
self.comment_cost(userprofile) >= 0
def comment_cost(self, userprofile):
"""
Calculate the cost of posting a comment. It it different
depending on whether we are posting to a regular news item,
or to an "Ask SN:" item.
"""
parent_news_item = self.get_parent_news_item()
if parent_news_item:
if parent_news_item.is_normal_news_item():
return news_settings.NEWS_COST_RESPOND_NEWS_ITEM
elif parent_news_item.is_ask_sn():
return news_settings.NEWS_COST_RESPOND_ASK_SN
return 0
def can_vote(self, userprofile):
"""
Return True if userprofile can vote on this rankable.
This function makes sure that the user has enough comment_points.
"""
return userprofile.comment_points - \
self.vote_cost(userprofile) >= 0
def vote_cost(self, userprofile):
"""
Calculate the cost of voting a comment. It differs
depending on what we are voting on.
"""
if self.is_news_item():
parent_news_item = self.get_parent_news_item()
if parent_news_item.is_ask_sn():
return news_settings.NEWS_COST_VOTE_ASK_SN
elif parent_news_item.is_normal_news_item():
return news_settings.NEWS_COST_VOTE_NEWS_ITEM
if self.is_comment():
parent_news_item = self.get_parent_news_item()
if parent_news_item:
if parent_news_item.is_normal_news_item():
return news_settings.NEWS_COST_VOTE_NORMAL_COMMENT
elif parent_news_item.is_ask_sn():
return news_settings.NEWS_COST_VOTE_ASK_SN_COMMENT
return 0
def already_voted(self, userprofile):
""" Returns True if userprofile already voted on this rankable. """
return len(self.raters.filter(id=userprofile.id)) > 0
class NewsItem(Rankable):
"""
A news item or an "Ask Swoosh News" item. Not a comment.
"""
# Every News item needs to be either a link to some other page
# OR some text.
title = models.CharField(max_length=news_settings.NEWS_MAX_TITLE_LENGTH)
url = models.URLField(null=True, verify_exists=False,
max_length=news_settings.NEWS_MAX_URL_LENGTH)
text = models.TextField(null=True)
def __unicode__(self):
if self.url:
return '%s (%s)' % (self.title, self.url)
elif self.text:
return '%s (%s)' % (self.title, self.text[0:20])
else:
return '%s (%s)' % (self.title, "ERROR! No URL or text!")
def is_normal_news_item(self):
"""
Returns true if this is a normal news item. It's not a
text post, but a post with a URL.
"""
if self.text:
return False
else:
return True
def is_ask_sn(self):
"""
Returns true if this is an "Ask Swoosh News"-type news post.
(no url to another site)
"""
return not self.is_normal_news_item()
def abbr_url(self):
"""
Returns an abbreviated url with just the domain name and tld.
"""
if not self.url:
return ''
(scheme, netloc, path, params, query, fragment) = urlparse(self.url)
if netloc.startswith("www."):
return netloc[4:]
return netloc
def response_cost(self):
""" Cost for responding to this news item in comment points. """
if self.is_normal_news_item():
return news_settings.NEWS_COST_RESPOND_NEWS_ITEM
elif self.is_ask_sn():
return news_settings.NEWS_COST_RESPOND_ASK_SN
else:
return 0
class Comment(Rankable):
"""
This represents a comment to a news item or a comment to a comment.
"""
# the text for the comment
text = models.TextField()
parent = models.ForeignKey(Rankable, related_name='child_set')
def get_newsitem(self):
""" Get the newsitem that this is posted to."""
if self.parent.is_news_item():
return self.parent.newsitem
else:
return self.parent.comment.get_newsitem()
def __unicode__(self):
return '%s ("%s")' % (self.poster.username, self.text[0:20])
|
[
"cdep.illabout@gmail.com"
] |
cdep.illabout@gmail.com
|
20d215ab84216efee4da368d5a8ad6e24ed57fc4
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03679/s358798230.py
|
083bf4ccd4da704fe0bfff938691cf5dbc1ec004
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 133
|
py
|
X, A, B = map(int, input().split())
if A >= B:
print('delicious')
elif A + X < B:
print('dangerous')
else:
print('safe')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
519e6d1ad5bda54f6ed5b6ff5dc4202c57d10141
|
6f0d8416daeb787b13938d5fa49c3d2e08d15e02
|
/tests/test_cam.py
|
5a5dbc61b10d60caf62b858b4f880f2bed62d9ec
|
[
"MIT"
] |
permissive
|
MartinHjelmare/matrixscreener
|
cbfc0ba95614c7dd6e152bb63a24b67ed03045ca
|
b6e93d9c96139cf5f2b8942d61681e45d7b6b4e5
|
refs/heads/master
| 2021-01-22T14:21:16.758654
| 2015-02-19T11:53:46
| 2015-02-19T11:53:46
| 57,959,734
| 0
| 0
| null | 2016-05-03T10:03:40
| 2016-05-03T10:03:40
| null |
UTF-8
|
Python
| false
| false
| 1,526
|
py
|
from matrixscreener.cam import *
import pytest
class EchoSocket:
"Dummy echo socket for mocking."
msg = ''
def send(self, msg):
self.msg = msg
return len(msg)
def recv(self, buffer_size):
return self.msg[0:buffer_size]
def connect(self, where):
pass
def settimeout(self, timeout):
pass
# TEST
#- key (here cli) overrided if defined several times
#- prefix added
#- types (integer, float) should be converted to strings
def test_echo(monkeypatch):
"Prefix + command sent should be same as echoed socket message."
# mock socket
monkeypatch.setattr("socket.socket", EchoSocket)
# setup cam
cam = CAM()
cmd = [('cli', 'custom'), ('cmd', 'enableall'), ('value', 'true'),
('integer', 1234), ('float', 0.00234)]
# monkeypathced EchoSocket will never flush
def flush():
pass
cam.flush = flush
echoed = cam.send(cmd)[0]
sent = tuples_as_dict(cam.prefix + cmd)
assert sent == echoed
def test_commands(monkeypatch):
"short hand commands should work as intended"
# mock socket
monkeypatch.setattr("socket.socket", EchoSocket)
# setup cam
cam = CAM()
# monkeypathced EchoSocket will never flush
def flush():
pass
cam.flush = flush
# get_information
cmd = cam.prefix + [
('cmd', 'getinfo'),
('dev', 'stage')
]
information = cam.get_information()
should_be = tuples_as_dict(cmd)
assert information == should_be
|
[
"arve.seljebu@gmail.com"
] |
arve.seljebu@gmail.com
|
d1ec7fe86549c7f36ecf5a7f839e9777eb08de7c
|
7722801646278ecf6a027858b08bdd0e88e7d3a2
|
/cnblog/settings.py
|
3290a5e1d5a2a831e425538c69bc8ef48705f7a1
|
[] |
no_license
|
ExBurner/BBS-Blog
|
39fbb17ffe1bc6d73db25b0c52434e6175a57860
|
bd57b2251f67b0b1c99adea9a02cec4deb4f1774
|
refs/heads/master
| 2020-03-23T21:26:45.309556
| 2018-07-24T05:14:20
| 2018-07-24T05:14:20
| 142,107,257
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,780
|
py
|
"""
Django settings for cnblog project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2llw9uvzo1tl&%*@%-kbltz$ky)jpkc!7h!et27r2tn1m=#e*v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cnblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cnblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cnblog',
'USER': 'root',
'PASSWORD': 'admin',
'HOST': 'localhost',
'PORT': 3306
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = "blog.User"
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'propagate': True,
'level': 'DEBUG',
},
}
}
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
LOGIN_URL = "/login/"
|
[
"1411694882@qq.com"
] |
1411694882@qq.com
|
25e43d5322bcc7e128c8c0debcbe2fe7cc9c0bb3
|
357488978a1436f1ef6d93f757c0ef06a21be50a
|
/setup.py
|
70d8a8f9fd8a1a5232c392041962f633dbec95be
|
[
"MIT"
] |
permissive
|
bgraver/Irelia
|
bd4fdd3a0fb5d925f871ccee6cc64d741df5313c
|
d1785fac7a3dadfa3af523f0637f5a838f830408
|
refs/heads/main
| 2023-03-18T15:14:11.416314
| 2021-03-20T23:16:57
| 2021-03-20T23:16:57
| 342,065,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent.resolve()
README = (HERE / "README.md").read_text()
description = "A lolesports api wrapper"
keywords = "api, lol, league of legends, lolesports, esports, wrapper"
VERSION = "0.0.7"
setup(
name="irelia",
version=VERSION,
description=description,
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/bgraver/Irelia",
author="Brandon Graver",
author_email="graver.brandon@gmail.com",
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
],
keywords=keywords,
packages=find_packages(),
python_requires=">=3",
install_requires=["requests"],
project_urls={
"Source": "https://github.com/bgraver/Irelia",
"Original API": "https://vickz84259.github.io/lolesports-api-docs/"
}
)
|
[
"graver.brandon@gmail.com"
] |
graver.brandon@gmail.com
|
4b32a00c650bafd26ad85ee0f76ed96d200dfce0
|
d99ac626d62c663704444a9cce7e7fc793a9e75e
|
/crypto_implementations/virgil-crypto-c/wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_alg_info_der_serializer.py
|
222936908c80c90638db7d52f3cdf4d1a644e7ae
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Experiment5X/CryptoFunctionDetection
|
3ab32d5573a249d24db1faf772721bc80b8d905d
|
dac700193e7e84963943593e36844b173211a8a1
|
refs/heads/master
| 2023-04-19T09:12:35.828268
| 2021-05-13T22:39:27
| 2021-05-13T22:39:27
| 355,299,557
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,174
|
py
|
# Copyright (C) 2015-2020 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_impl import vscf_impl_t
from virgil_crypto_lib.common._c_bridge import vsc_buffer_t
class vscf_alg_info_der_serializer_t(Structure):
pass
class VscfAlgInfoDerSerializer(object):
"""Provide DER serializer of algorithm information."""
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_alg_info_der_serializer_new(self):
vscf_alg_info_der_serializer_new = self._lib.vscf_alg_info_der_serializer_new
vscf_alg_info_der_serializer_new.argtypes = []
vscf_alg_info_der_serializer_new.restype = POINTER(vscf_alg_info_der_serializer_t)
return vscf_alg_info_der_serializer_new()
def vscf_alg_info_der_serializer_delete(self, ctx):
vscf_alg_info_der_serializer_delete = self._lib.vscf_alg_info_der_serializer_delete
vscf_alg_info_der_serializer_delete.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_delete.restype = None
return vscf_alg_info_der_serializer_delete(ctx)
def vscf_alg_info_der_serializer_use_asn1_writer(self, ctx, asn1_writer):
vscf_alg_info_der_serializer_use_asn1_writer = self._lib.vscf_alg_info_der_serializer_use_asn1_writer
vscf_alg_info_der_serializer_use_asn1_writer.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t)]
vscf_alg_info_der_serializer_use_asn1_writer.restype = None
return vscf_alg_info_der_serializer_use_asn1_writer(ctx, asn1_writer)
def vscf_alg_info_der_serializer_serialized_len(self, ctx, alg_info):
"""Return buffer size enough to hold serialized algorithm."""
vscf_alg_info_der_serializer_serialized_len = self._lib.vscf_alg_info_der_serializer_serialized_len
vscf_alg_info_der_serializer_serialized_len.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t)]
vscf_alg_info_der_serializer_serialized_len.restype = c_size_t
return vscf_alg_info_der_serializer_serialized_len(ctx, alg_info)
def vscf_alg_info_der_serializer_serialize(self, ctx, alg_info, out):
"""Serialize algorithm info to buffer class."""
vscf_alg_info_der_serializer_serialize = self._lib.vscf_alg_info_der_serializer_serialize
vscf_alg_info_der_serializer_serialize.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t), POINTER(vsc_buffer_t)]
vscf_alg_info_der_serializer_serialize.restype = None
return vscf_alg_info_der_serializer_serialize(ctx, alg_info, out)
def vscf_alg_info_der_serializer_setup_defaults(self, ctx):
"""Setup predefined values to the uninitialized class dependencies."""
vscf_alg_info_der_serializer_setup_defaults = self._lib.vscf_alg_info_der_serializer_setup_defaults
vscf_alg_info_der_serializer_setup_defaults.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_setup_defaults.restype = None
return vscf_alg_info_der_serializer_setup_defaults(ctx)
def vscf_alg_info_der_serializer_serialize_inplace(self, ctx, alg_info):
"""Serialize by using internal ASN.1 writer.
Note, that caller code is responsible to reset ASN.1 writer with
an output buffer."""
vscf_alg_info_der_serializer_serialize_inplace = self._lib.vscf_alg_info_der_serializer_serialize_inplace
vscf_alg_info_der_serializer_serialize_inplace.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t)]
vscf_alg_info_der_serializer_serialize_inplace.restype = c_size_t
return vscf_alg_info_der_serializer_serialize_inplace(ctx, alg_info)
def vscf_alg_info_der_serializer_shallow_copy(self, ctx):
vscf_alg_info_der_serializer_shallow_copy = self._lib.vscf_alg_info_der_serializer_shallow_copy
vscf_alg_info_der_serializer_shallow_copy.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_shallow_copy.restype = POINTER(vscf_alg_info_der_serializer_t)
return vscf_alg_info_der_serializer_shallow_copy(ctx)
def vscf_alg_info_der_serializer_impl(self, ctx):
vscf_alg_info_der_serializer_impl = self._lib.vscf_alg_info_der_serializer_impl
vscf_alg_info_der_serializer_impl.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_impl.restype = POINTER(vscf_impl_t)
return vscf_alg_info_der_serializer_impl(ctx)
|
[
"xmeadamx@gmail.com"
] |
xmeadamx@gmail.com
|
030cdfb3525f9d9fc8ae8a7fe08cf161d7ff1b7d
|
f59866c0dc5c8d9ff8b1f9073b00e952dbf2821a
|
/convnet3d/maxpool3d.py
|
afe4acf0f26c7bbb9975d0e5ff7d8f48719f225a
|
[] |
no_license
|
FancccyRay/ChaLearn2014-Gesture-Recognition
|
a27a22c8c1ec487a1bbf5eefc6a5eae74d470c4e
|
e696fdbca0602463da58dfa89f839232eebf6a43
|
refs/heads/master
| 2020-03-27T12:48:59.052115
| 2019-11-14T07:13:27
| 2019-11-14T07:13:27
| 146,571,629
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,389
|
py
|
"""
Max pooling spatio-temporal inputs for Theano
"""
from theano import tensor
from theano.tensor.signal.downsample import DownsampleFactorMax, max_pool_2d
def max_pool_3d(input, ds, ignore_border=False):
"""
Takes as input a N-D tensor, where N >= 3. It downscales the input video by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1],ds[2]) (time, height, width)
:type input: N-D theano tensor of input images.
:param input: input images. Max pooling will be done over the 3 last dimensions.
:type ds: tuple of length 3
:param ds: factor by which to downscale. (2,2,2) will halve the video in each dimension.
:param ignore_border: boolean value. When True, (5,5,5) input with ds=(2,2,2) will generate a
(2,2,2) output. (3,3,3) otherwise.
"""
if input.ndim < 3:
raise NotImplementedError('max_pool_3d requires a dimension >= 3')
if ds[0]==1:
return max_pool_2d(input, ds[1:], ignore_border=ignore_border)
# extract nr dimensions
vid_dim = input.ndim
# max pool in two different steps, so we can use the 2d implementation of
# downsamplefactormax. First maxpool frames as usual.
# Then maxpool the time dimension. Shift the time dimension to the third
# position, so rows and cols are in the back
# extract dimensions
frame_shape = input.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,height,width)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]),
frame_shape), 'int32')
input_4D = tensor.reshape(input, new_shape, ndim=4)
# downsample mini-batch of videos in rows and cols
op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)
output = op(input_4D)
# restore to original shape
outshape = tensor.join(0, input.shape[:-2], output.shape[-2:])
out = tensor.reshape(output, outshape, ndim=input.ndim)
# now maxpool time
# output (time, rows, cols), reshape so that time is in the back
shufl = (list(range(vid_dim-3)) + [vid_dim-2]+[vid_dim-1]+[vid_dim-3])
input_time = out.dimshuffle(shufl)
# reset dimensions
vid_shape = input_time.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input_time.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,width,time)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]),
vid_shape), 'int32')
input_4D_time = tensor.reshape(input_time, new_shape, ndim=4)
# downsample mini-batch of videos in time
op = DownsampleFactorMax((1,ds[0]), ignore_border)
outtime = op(input_4D_time)
# output
# restore to original shape (xxx, rows, cols, time)
outshape = tensor.join(0, input_time.shape[:-2], outtime.shape[-2:])
shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
return tensor.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
|
[
"noreply@github.com"
] |
FancccyRay.noreply@github.com
|
e2fab65a5b6ecf140c91a2424279ee50dad96516
|
352baad0e880b5de14e8fa497edb7effab484423
|
/no_greedy_repetion.py
|
733db3229f85d28a0ae4acc4fa894a352ac6ae4b
|
[] |
no_license
|
avicse007/python
|
ac714e0b58fafa2f9f6db993f36546a1d9d46c7e
|
56ab13aa411af0e3fc2d58e9ce4a61b99ef5583b
|
refs/heads/master
| 2021-08-24T00:28:53.280371
| 2017-12-07T08:14:49
| 2017-12-07T08:14:49
| 113,421,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
#no_greedy_repetion.py
import re
from pattern_syntax import test_pattern
test_pattern(
'abbaabbba',
[ ('ab*?', 'a followed by zero or more b'),
('ab+?', 'a followed by one or more b'),
('ab??', 'a followed by zero or one b'),
('ab{3}?', 'a followed by three b'),
('ab{2,3}?', 'a followed by two to three b'),
])
|
[
"avkumars@adobe.com"
] |
avkumars@adobe.com
|
6c8f6d9c70fff9f3031bcd03deeb4a685a6af478
|
985b52bd0c0a569fb97ee334de544ba18fcbd90c
|
/code/提取表达量.py
|
dfd391a5543ef18be4e93103569cd23e0f1f3563
|
[] |
no_license
|
Lch-SYSU/Undergraduate-Graduation-Project
|
e10a73ddf9b63fa1d6df684c938a8752aff654e8
|
a51668eae203664e4a5b342747d29a600ecb0236
|
refs/heads/master
| 2020-04-30T15:17:43.932341
| 2019-05-05T15:44:14
| 2019-05-05T15:44:14
| 176,916,735
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,232
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 24 13:44:55 2019
@author: lin
"""
import pandas as pd
with open(r'D:\学习\毕设\数据\data\HELA\SRR3589958.T.txt') as f1:
# 读入ENSG数据,储存于列表
data = pd.read_table(r'D:\学习\毕设\数据\data\HELA\GC0.5 ENSG.txt', encoding = 'UTF-8', engine = 'python')
seq_engs = data['EnsemblGeneID'].tolist()
# 初始化变量
i = 0
expr = []
engs_expr = {}
# 根据ENSG号寻找TPM,写入列表中
for i in range(len(seq_engs)):
for line in f1:
if seq_engs[i] in line:
expr.append(float(line.strip().split(sep = '\t')[3]))
# TPM从大到小排序后写入字典中
expr.sort(reverse = True)
engs_expr.setdefault(seq_engs[i], expr)
# 初始化变量,文件指针回到0
expr = []
f1.seek(0)
print(engs_expr)
# 字典转为数据框
writing = pd.DataFrame(pd.Series(engs_expr))
# 将数据框写入文件中
writing.to_csv(r'D:\学习\毕设\数据\data\HELA\GC0.5 TPM SRR3589958.csv', sep = ',', na_rep = 'NULL', header = True)
|
[
"noreply@github.com"
] |
Lch-SYSU.noreply@github.com
|
2182531e49175062ac8b030e998b5c2c6ca3ae8d
|
cad91ae76d2746a6c28ddda0f33a58f9d461378f
|
/PyTorch/Recommendation/NCF/feature_spec.py
|
40d56a0e310d345e17261e9bbfbd4618f5acb691
|
[
"Apache-2.0"
] |
permissive
|
NVIDIA/DeepLearningExamples
|
fe677521e7e2a16e3cb0b77e358f9aab72f8c11a
|
a5388a45f71a949639b35cc5b990bd130d2d8164
|
refs/heads/master
| 2023-08-31T20:57:08.798455
| 2023-08-23T10:09:12
| 2023-08-23T10:09:12
| 131,881,622
| 11,838
| 3,124
| null | 2023-08-28T16:57:33
| 2018-05-02T17:04:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,943
|
py
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import os
from typing import List, Dict
class FeatureSpec:
def __init__(self, feature_spec, source_spec, channel_spec, metadata, base_directory):
self.feature_spec: Dict = feature_spec
self.source_spec: Dict = source_spec
self.channel_spec: Dict = channel_spec
self.metadata: Dict = metadata
self.base_directory: str = base_directory
@classmethod
def from_yaml(cls, path):
with open(path, 'r') as feature_spec_file:
base_directory = os.path.dirname(path)
feature_spec = yaml.safe_load(feature_spec_file)
return cls.from_dict(feature_spec, base_directory=base_directory)
@classmethod
def from_dict(cls, source_dict, base_directory):
return cls(base_directory=base_directory, **source_dict)
def to_dict(self) -> Dict:
attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata']
return {attr: self.__dict__[attr] for attr in attributes_to_dump}
def to_string(self):
return yaml.dump(self.to_dict())
def to_yaml(self, output_path=None):
if not output_path:
output_path = self.base_directory + '/feature_spec.yaml'
with open(output_path, 'w') as output_file:
print(yaml.dump(self.to_dict()), file=output_file)
|
[
"kkudrynski@nvidia.com"
] |
kkudrynski@nvidia.com
|
e40999a46c01874385822376d0d5db29b424fbeb
|
b232603cf25a61a2f5f78c3041360e3188883049
|
/KNN Movie Recommendation/movie-recom.py
|
8d9adcb6027ff45e631fcba90b1cbefa08d7afb6
|
[] |
no_license
|
charankurru/Recomendation_systems
|
2a9f75feb03dfbbfbfdacfb1c2673c51a7f46131
|
144f6a30a24df4ffc155119007aa1d3804b392b3
|
refs/heads/master
| 2022-12-01T15:06:31.544847
| 2020-08-22T08:16:58
| 2020-08-22T08:16:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,281
|
py
|
# importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#importing datasets
movie_df = pd.read_csv('movies.csv')
ratings_df = pd.read_csv('ratings.csv')
#merging datasets
movie_ratings = pd.merge(ratings_df,movie_df,on = 'movieId')
movie_ratings = movie_ratings.drop(["timestamp","genres"],axis = 1)
movie_ratings = movie_ratings.dropna(axis = 0, subset = ['title'])
# finding total number of ratings per each movie
movie_ratingCount = (movie_ratings.
groupby(by = ['title'])['rating'].
count().
reset_index().
rename(columns = {'rating': 'totalRatingCount'})
[['title', 'totalRatingCount']]
)
movie_ratingCount.head()
# merging the total number of ratings per each movie dataframe to movie_ratings dataset
movie_ratings = movie_ratings.merge(movie_ratingCount,on = 'title')
# filtering the data by making an threshold value
movie_ratings = movie_ratings.query('totalRatingCount >= 50')
#converting the dataset into pivot table
movie_ratings_pivot = movie_ratings.pivot_table(index = 'title',columns = 'userId',values = 'rating').fillna(0)
#converting pivot table into sparse matrix
from scipy.sparse import csr_matrix
movie_ratings_matrix = csr_matrix(movie_ratings_pivot.values)
movie_ratings_matrix.head()
#fitting the knn model ie NearestNeighbours
from sklearn.neighbors import NearestNeighbors
model_knn = NearestNeighbors(metric = 'cosine', algorithm = 'brute')
model_knn.fit(movie_ratings_matrix)
# predicting the results
query_index = np.random.choice(movie_ratings_pivot.shape[0])
print(query_index)
distances, indices = model_knn.kneighbors(movie_ratings_pivot.iloc[query_index,:].values.reshape(1, -1), n_neighbors = 6)
# =============================================================================
# print(distances)
# print(distances.flatten())
# print(movie_ratings_pivot.iloc[query_index,:].values.reshape(1,-1))
#
# =============================================================================
for i in range(0, len(distances.flatten())):
if i == 0:
print('Recommendations for {0}:\n'.format(movie_ratings_pivot.index[query_index]))
else:
print('{0}: {1}, with distance of {2}:'.format(i, movie_ratings_pivot.index[indices.flatten()[i]], distances.flatten()[i]))
|
[
"61787496+charankurru@users.noreply.github.com"
] |
61787496+charankurru@users.noreply.github.com
|
da878145baa16b59947043420038f917d29d43bd
|
e7b483d88f80703c89553e1b9e2f5dd0322f7e38
|
/sketch/util/http.py
|
e69fe5f151af3818aae7e26ffc6a7d32826a3f52
|
[
"BSD-2-Clause"
] |
permissive
|
nikcub/Sketch
|
0f559ff9948bd355407257c25c261c1e0f237021
|
5d2d5f7e51c3eed374a8b12441dc8577b16c101e
|
refs/heads/master
| 2016-09-09T23:32:10.243530
| 2011-11-04T13:56:03
| 2011-11-04T13:56:03
| 2,592,091
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,451
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=2:sw=2:expandtab
#
# Copyright (c) 2011, Nik Cubrilovic. All rights reserved.
#
# <nikcub@gmail.com> <http://nikcub.appspot.com>
#
# Licensed under a BSD license. You may obtain a copy of the License at
#
# http://nikcub.appspot.com/bsd-license
#
"""
Sketch - TM_FILENAME}
desc
"""
import webob
import urlparse
def extract_dataurl(dataurl):
if not dataurl[:5] == 'data:':
return (None, None)
img_index = dataurl.index(',')
if not img_index:
return (None, None)
img_type = dataurl[5:img_index].split(';')[0]
img_dat_enc = dataurl[img_index + 1:]
import base64
img_dat = base64.decodestring(img_dat_enc)
return (img_dat, img_type)
def urlunsplit(scheme=None, netloc=None, path=None, query=None, fragment=None):
"""Similar to ``urlparse.urlunsplit``, but will escape values and
urlencode and sort query arguments.
:param scheme:
URL scheme, e.g., `http` or `https`.
:param netloc:
Network location, e.g., `localhost:8080` or `www.google.com`.
:param path:
URL path.
:param query:
URL query as an escaped string, or a dictionary or list of key-values
tuples to build a query.
:param fragment:
Fragment identifier, also known as "anchor".
:returns:
An assembled absolute or relative URL.
"""
if not scheme or not netloc:
scheme = None
netloc = None
if path:
path = urllib.quote(to_utf8(path))
if query and not isinstance(query, basestring):
if isinstance(query, dict):
query = query.items()
query_args = []
for key, values in query:
if isinstance(values, basestring):
values = (values,)
for value in values:
query_args.append((to_utf8(key), to_utf8(value)))
# Sorting should be optional? Sorted args are commonly needed to build
# URL signatures for services.
query_args.sort()
query = urllib.urlencode(query_args)
if fragment:
fragment = urllib.quote(to_utf8(fragment))
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def test_normalize_url():
urls = [
# 'example.com',
# 'example.com/',
# 'http://example.com/',
# 'http://example.com',
# 'http://example.com?',
# 'http://example.com/?',
# 'http://example.com//',
# 'http://example.com/a',
# 'http://example.com/a/',
# 'http://example.com/a/?',
# 'http://example.com/a/../',
# 'http://example.com/a/../?',
# 'http://example.com/a/b/../?',
# 'http://example.com/a/../',
# 'http://example.com/a/b/?z=1',
'http://example.com/a/?',
'http://@example.com/a/?',
'http://example.com:/a/?',
'http://@example.com:/a/?',
'http://example.com:80/a/?',
]
for url in urls:
print "%s \t\t\t\t\t\tclean: %s" % (url, normalize_url(url))
def normalize_url(s, charset='utf-8'):
"""
function that attempts to mimic browser URL normalization.
Partly taken from werkzeug.utils
<http://www.bitbucket.org/mitsuhiko/werkzeug-main/src/tip/werkzeug/utils.py>
There is a lot to URL normalization, see:
<http://en.wikipedia.org/wiki/URL_normalization>
:param charset: The target charset for the URL if the url was
given as unicode string.
"""
if isinstance(s, unicode):
s = s.encode(charset, 'ignore')
scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
# print "scheme: %s\n netloc:%s\n path:%s\n qs:%s\n anchor:%s\n" % (scheme, netloc, path, qs, anchor)
path = urllib.unquote(path)
if not netloc:
netloc = path.strip("/\\:?&")
path = '/'
if not scheme:
scheme = "http"
if not path:
path = '/'
netloc = netloc.strip("/\\:@?&")
path = posixpath.normpath(path)
path = urlparse.urljoin('/', path)
# path = urllib.quote(path, '/%')
qs = urllib.quote_plus(qs, ':&=')
# print "scheme: %s\n netloc:%s\n path:%s\n qs:%s\n anchor:%s\n" % (scheme, netloc, path, qs, anchor)
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
def redirect(location, code = 302):
assert code in (301, 302, 303, 305, 307), 'invalid code'
from sketch import Response
display_location = location
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h1>\n'
'<p>You should be redirected automatically to target URL: '
'<a href="%s">%s</a>. If not click the link.' %
(location, display_location), code, mimetype='text/html')
response.headers['Location'] = location
return response
def abort_old(code, *args, **kwargs):
"""Raises an ``HTTPException``. The exception is instantiated passing
*args* and *kwargs*.
:param code:
A valid HTTP error code from ``webob.exc.status_map``, a dictionary
mapping status codes to subclasses of ``HTTPException``.
:param args:
Arguments to be used to instantiate the exception.
:param kwargs:
Keyword arguments to be used to instantiate the exception.
"""
cls = webob.exc.status_map.get(code)
if not cls:
raise KeyError('No exception is defined for code %r.' % code)
raise cls(*args, **kwargs)
def get_valid_methods(handler):
"""Returns a list of HTTP methods supported by a handler.
:param handler:
A :class:`RequestHandler` instance.
:returns:
A list of HTTP methods supported by the handler.
"""
return [method for method in Application.ALLOWED_METHODS if getattr(handler,
method.lower().replace('-', '_'), None)]
|
[
"nikcub@gmail.com"
] |
nikcub@gmail.com
|
7671b176825739808ea2026fb1ee698eaf08d5a7
|
7282797285061f0394f5e4050d57141762749d71
|
/aoc-5-1.py
|
9bb4c1722a3d4fbe48395f0b6c2c71b20162a4bc
|
[] |
no_license
|
woodgern/AdventOfCode2018
|
c54804d5190af8d91c3625a1a9edd3d1953bf82c
|
d6d17310e81b38d9a930c230f442a2463c5394b2
|
refs/heads/master
| 2020-04-17T12:21:29.811465
| 2019-01-19T18:36:51
| 2019-01-19T18:36:51
| 166,576,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
def match(string1, string2):
return string1.lower() == string2.lower() and string1 != string2
with open("aoc-5-1.txt") as f:
content = f.readlines()
content = [x.strip() for x in content][0]
checking = False
while True:
i = 0
while i < len(content) - 1:
if match(content[i], content[i + 1]):
checking = False
content = content[:i] + content[i + 2:]
else:
i = i + 1
if checking:
break
checking = True
print(len(content))
|
[
"woodgern@gmail.com"
] |
woodgern@gmail.com
|
deca86cac4f0a3e968294001655f7a01e2beeb98
|
1060ba4bf43f92d91ade4fc33f7fa36cc76a1029
|
/batch/das_client.py
|
2b2109491430d7b9c4b4f0505ad14544562a49ee
|
[] |
no_license
|
CMS-PD/ALPHA
|
599035f9d328b126c34362030fceebd56881fb94
|
22ed23d28f8a6ad041c8be574732e68c2a850975
|
refs/heads/master
| 2021-01-17T02:49:11.755447
| 2017-12-12T08:45:25
| 2017-12-12T08:45:25
| 58,551,780
| 0
| 11
| null | 2017-12-12T08:45:26
| 2016-05-11T14:23:33
|
C++
|
UTF-8
|
Python
| false
| false
| 19,324
|
py
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#pylint: disable=C0301,C0103,R0914,R0903
"""
DAS command line tool
"""
from __future__ import print_function
__author__ = "Valentin Kuznetsov"
# system modules
import os
import sys
import pwd
if sys.version_info < (2, 6):
raise Exception("DAS requires python 2.6 or greater")
DAS_CLIENT = 'das-client/1.1::python/%s.%s' % sys.version_info[:2]
import os
import re
import time
import json
import urllib
import urllib2
import httplib
import cookielib
from optparse import OptionParser
from math import log
from types import GeneratorType
# define exit codes according to Linux sysexists.h
EX_OK = 0 # successful termination
EX__BASE = 64 # base value for error messages
EX_USAGE = 64 # command line usage error
EX_DATAERR = 65 # data format error
EX_NOINPUT = 66 # cannot open input
EX_NOUSER = 67 # addressee unknown
EX_NOHOST = 68 # host name unknown
EX_UNAVAILABLE = 69 # service unavailable
EX_SOFTWARE = 70 # internal software error
EX_OSERR = 71 # system error (e.g., can't fork)
EX_OSFILE = 72 # critical OS file missing
EX_CANTCREAT = 73 # can't create (user) output file
EX_IOERR = 74 # input/output error
EX_TEMPFAIL = 75 # temp failure; user is invited to retry
EX_PROTOCOL = 76 # remote error in protocol
EX_NOPERM = 77 # permission denied
EX_CONFIG = 78 # configuration error
class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
"""
Simple HTTPS client authentication class based on provided
key/ca information
"""
def __init__(self, key=None, cert=None, level=0):
if level > 1:
urllib2.HTTPSHandler.__init__(self, debuglevel=1)
else:
urllib2.HTTPSHandler.__init__(self)
self.key = key
self.cert = cert
def https_open(self, req):
"""Open request method"""
#Rather than pass in a reference to a connection class, we pass in
# a reference to a function which, for all intents and purposes,
# will behave as a constructor
return self.do_open(self.get_connection, req)
def get_connection(self, host, timeout=300):
"""Connection method"""
if self.key:
return httplib.HTTPSConnection(host, key_file=self.key,
cert_file=self.cert)
return httplib.HTTPSConnection(host)
def x509():
"Helper function to get x509 either from env or tmp file"
proxy = os.environ.get('X509_USER_PROXY', '')
if not proxy:
proxy = '/tmp/x509up_u%s' % pwd.getpwuid( os.getuid() ).pw_uid
if not os.path.isfile(proxy):
return ''
return proxy
def check_glidein():
"Check glideine environment and exit if it is set"
glidein = os.environ.get('GLIDEIN_CMSSite', '')
if glidein:
msg = "ERROR: das_client is running from GLIDEIN environment, it is prohibited"
print(msg)
sys.exit(EX__BASE)
def check_auth(key):
"Check if user runs das_client with key/cert and warn users to switch"
if not key:
msg = "WARNING: das_client is running without user credentials/X509 proxy, create proxy via 'voms-proxy-init -voms cms -rfc'"
print(msg, file=sys.stderr)
class DASOptionParser:
"""
DAS cache client option parser
"""
def __init__(self):
usage = "Usage: %prog [options]\n"
usage += "For more help please visit https://cmsweb.cern.ch/das/faq"
self.parser = OptionParser(usage=usage)
self.parser.add_option("-v", "--verbose", action="store",
type="int", default=0, dest="verbose",
help="verbose output")
self.parser.add_option("--query", action="store", type="string",
default=False, dest="query",
help="specify query for your request")
msg = "host name of DAS cache server, default is https://cmsweb.cern.ch"
self.parser.add_option("--host", action="store", type="string",
default='https://cmsweb.cern.ch', dest="host", help=msg)
msg = "start index for returned result set, aka pagination,"
msg += " use w/ limit (default is 0)"
self.parser.add_option("--idx", action="store", type="int",
default=0, dest="idx", help=msg)
msg = "number of returned results (default is 10),"
msg += " use --limit=0 to show all results"
self.parser.add_option("--limit", action="store", type="int",
default=10, dest="limit", help=msg)
msg = 'specify return data format (json or plain), default plain.'
self.parser.add_option("--format", action="store", type="string",
default="plain", dest="format", help=msg)
msg = 'query waiting threshold in sec, default is 5 minutes'
self.parser.add_option("--threshold", action="store", type="int",
default=300, dest="threshold", help=msg)
msg = 'specify private key file name, default $X509_USER_PROXY'
self.parser.add_option("--key", action="store", type="string",
default=x509(), dest="ckey", help=msg)
msg = 'specify private certificate file name, default $X509_USER_PROXY'
self.parser.add_option("--cert", action="store", type="string",
default=x509(), dest="cert", help=msg)
msg = 'specify number of retries upon busy DAS server message'
self.parser.add_option("--retry", action="store", type="string",
default=0, dest="retry", help=msg)
msg = 'show DAS headers in JSON format'
msg += ' (obsolete, keep for backward compatibility)'
self.parser.add_option("--das-headers", action="store_true",
default=False, dest="das_headers", help=msg)
msg = 'specify power base for size_format, default is 10 (can be 2)'
self.parser.add_option("--base", action="store", type="int",
default=0, dest="base", help=msg)
msg = 'a file which contains a cached json dictionary for query -> files mapping'
self.parser.add_option("--cache", action="store", type="string",
default=None, dest="cache", help=msg)
msg = 'List DAS key/attributes, use "all" or specific DAS key value, e.g. site'
self.parser.add_option("--list-attributes", action="store", type="string",
default="", dest="keys_attrs", help=msg)
def get_opt(self):
"""
Returns parse list of options
"""
return self.parser.parse_args()
def convert_time(val):
"Convert given timestamp into human readable format"
if isinstance(val, int) or isinstance(val, float):
return time.strftime('%d/%b/%Y_%H:%M:%S_GMT', time.gmtime(val))
return val
def size_format(uinput, ibase=0):
"""
Format file size utility, it converts file size into KB, MB, GB, TB, PB units
"""
if not ibase:
return uinput
try:
num = float(uinput)
except Exception as _exc:
return uinput
if ibase == 2.: # power of 2
base = 1024.
xlist = ['', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']
else: # default base is 10
base = 1000.
xlist = ['', 'KB', 'MB', 'GB', 'TB', 'PB']
for xxx in xlist:
if num < base:
return "%3.1f%s" % (num, xxx)
num /= base
def unique_filter(rows):
"""
Unique filter drop duplicate rows.
"""
old_row = {}
row = None
for row in rows:
row_data = dict(row)
try:
del row_data['_id']
del row_data['das']
del row_data['das_id']
del row_data['cache_id']
except:
pass
old_data = dict(old_row)
try:
del old_data['_id']
del old_data['das']
del old_data['das_id']
del old_data['cache_id']
except:
pass
if row_data == old_data:
continue
if old_row:
yield old_row
old_row = row
yield row
def extract_value(row, key):
"""Generator which extracts row[key] value"""
if isinstance(row, dict) and key in row:
if key == 'creation_time':
row = convert_time(row[key])
elif key == 'size':
row = size_format(row[key], base)
else:
row = row[key]
yield row
if isinstance(row, list) or isinstance(row, GeneratorType):
for item in row:
for vvv in extract_value(item, key):
yield vvv
def get_value(data, filters, base=10):
"""Filter data from a row for given list of filters"""
for ftr in filters:
if ftr.find('>') != -1 or ftr.find('<') != -1 or ftr.find('=') != -1:
continue
row = dict(data)
values = []
keys = ftr.split('.')
for key in keys:
val = [v for v in extract_value(row, key)]
if key == keys[-1]: # we collect all values at last key
values += [json.dumps(i) for i in val]
else:
row = val
if len(values) == 1:
yield values[0]
else:
yield values
def fullpath(path):
"Expand path to full path"
if path and path[0] == '~':
path = path.replace('~', '')
path = path[1:] if path[0] == '/' else path
path = os.path.join(os.environ['HOME'], path)
return path
def get_data(host, query, idx, limit, debug, threshold=300, ckey=None,
cert=None, das_headers=True):
"""Contact DAS server and retrieve data for given DAS query"""
params = {'input':query, 'idx':idx, 'limit':limit}
path = '/das/cache'
pat = re.compile('http[s]{0,1}://')
if not pat.match(host):
msg = 'Invalid hostname: %s' % host
raise Exception(msg)
url = host + path
client = '%s (%s)' % (DAS_CLIENT, os.environ.get('USER', ''))
headers = {"Accept": "application/json", "User-Agent": client}
encoded_data = urllib.urlencode(params, doseq=True)
url += '?%s' % encoded_data
req = urllib2.Request(url=url, headers=headers)
if ckey and cert:
ckey = fullpath(ckey)
cert = fullpath(cert)
http_hdlr = HTTPSClientAuthHandler(ckey, cert, debug)
else:
http_hdlr = urllib2.HTTPHandler(debuglevel=debug)
proxy_handler = urllib2.ProxyHandler({})
cookie_jar = cookielib.CookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
opener = urllib2.build_opener(http_hdlr, proxy_handler, cookie_handler)
fdesc = opener.open(req)
data = fdesc.read()
fdesc.close()
pat = re.compile(r'^[a-z0-9]{32}')
if data and isinstance(data, str) and pat.match(data) and len(data) == 32:
pid = data
else:
pid = None
iwtime = 2 # initial waiting time in seconds
wtime = 20 # final waiting time in seconds
sleep = iwtime
time0 = time.time()
while pid:
params.update({'pid':data})
encoded_data = urllib.urlencode(params, doseq=True)
url = host + path + '?%s' % encoded_data
req = urllib2.Request(url=url, headers=headers)
try:
fdesc = opener.open(req)
data = fdesc.read()
fdesc.close()
except urllib2.HTTPError as err:
return {"status":"fail", "reason":str(err)}
if data and isinstance(data, str) and pat.match(data) and len(data) == 32:
pid = data
else:
pid = None
time.sleep(sleep)
if sleep < wtime:
sleep *= 2
elif sleep == wtime:
sleep = iwtime # start new cycle
else:
sleep = wtime
if (time.time()-time0) > threshold:
reason = "client timeout after %s sec" % int(time.time()-time0)
return {"status":"fail", "reason":reason}
jsondict = json.loads(data)
return jsondict
def prim_value(row):
"""Extract primary key value from DAS record"""
prim_key = row['das']['primary_key']
if prim_key == 'summary':
return row[prim_key]
key, att = prim_key.split('.')
if isinstance(row[key], list):
for item in row[key]:
if att in item:
return item[att]
else:
return row[key][att]
def print_summary(rec):
"Print summary record information on stdout"
if 'summary' not in rec:
msg = 'Summary information is not found in record:\n', rec
raise Exception(msg)
for row in rec['summary']:
keys = [k for k in row.keys()]
maxlen = max([len(k) for k in keys])
for key, val in row.items():
pkey = '%s%s' % (key, ' '*(maxlen-len(key)))
print('%s: %s' % (pkey, val))
print()
def print_from_cache(cache, query):
"print the list of files reading it from cache"
data = open(cache).read()
jsondict = json.loads(data)
if query in jsondict:
print("\n".join(jsondict[query]))
exit(0)
exit(1)
def keys_attrs(lkey, oformat, host, ckey, cert, debug=0):
"Contact host for list of key/attributes pairs"
url = '%s/das/keys?view=json' % host
headers = {"Accept": "application/json", "User-Agent": DAS_CLIENT}
req = urllib2.Request(url=url, headers=headers)
if ckey and cert:
ckey = fullpath(ckey)
cert = fullpath(cert)
http_hdlr = HTTPSClientAuthHandler(ckey, cert, debug)
else:
http_hdlr = urllib2.HTTPHandler(debuglevel=debug)
proxy_handler = urllib2.ProxyHandler({})
cookie_jar = cookielib.CookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(cookie_jar)
opener = urllib2.build_opener(http_hdlr, proxy_handler, cookie_handler)
fdesc = opener.open(req)
data = json.load(fdesc)
fdesc.close()
if oformat.lower() == 'json':
if lkey == 'all':
print(json.dumps(data))
else:
print(json.dumps({lkey:data[lkey]}))
return
for key, vdict in data.items():
if lkey == 'all':
pass
elif lkey != key:
continue
print()
print("DAS key:", key)
for attr, examples in vdict.items():
prefix = ' '
print('%s%s' % (prefix, attr))
for item in examples:
print('%s%s%s' % (prefix, prefix, item))
def main():
"""Main function"""
optmgr = DASOptionParser()
opts, _ = optmgr.get_opt()
host = opts.host
debug = opts.verbose
query = opts.query
idx = opts.idx
limit = opts.limit
thr = opts.threshold
ckey = opts.ckey
cert = opts.cert
base = opts.base
check_glidein()
check_auth(ckey)
if opts.keys_attrs:
keys_attrs(opts.keys_attrs, opts.format, host, ckey, cert, debug)
return
if not query:
print('Input query is missing')
sys.exit(EX_USAGE)
if opts.format == 'plain':
jsondict = get_data(host, query, idx, limit, debug, thr, ckey, cert)
cli_msg = jsondict.get('client_message', None)
if cli_msg:
print("DAS CLIENT WARNING: %s" % cli_msg)
if 'status' not in jsondict and opts.cache:
print_from_cache(opts.cache, query)
if 'status' not in jsondict:
print('DAS record without status field:\n%s' % jsondict)
sys.exit(EX_PROTOCOL)
if jsondict["status"] != 'ok' and opts.cache:
print_from_cache(opts.cache, query)
if jsondict['status'] != 'ok':
print("status: %s, reason: %s" \
% (jsondict.get('status'), jsondict.get('reason', 'N/A')))
if opts.retry:
found = False
for attempt in xrange(1, int(opts.retry)):
interval = log(attempt)**5
print("Retry in %5.3f sec" % interval)
time.sleep(interval)
data = get_data(host, query, idx, limit, debug, thr, ckey, cert)
jsondict = json.loads(data)
if jsondict.get('status', 'fail') == 'ok':
found = True
break
else:
sys.exit(EX_TEMPFAIL)
if not found:
sys.exit(EX_TEMPFAIL)
nres = jsondict.get('nresults', 0)
if not limit:
drange = '%s' % nres
else:
drange = '%s-%s out of %s' % (idx+1, idx+limit, nres)
if opts.limit:
msg = "\nShowing %s results" % drange
msg += ", for more results use --idx/--limit options\n"
print(msg)
mongo_query = jsondict.get('mongo_query', {})
unique = False
fdict = mongo_query.get('filters', {})
filters = fdict.get('grep', [])
aggregators = mongo_query.get('aggregators', [])
if 'unique' in fdict.keys():
unique = True
if filters and not aggregators:
data = jsondict['data']
if isinstance(data, dict):
rows = [r for r in get_value(data, filters, base)]
print(' '.join(rows))
elif isinstance(data, list):
if unique:
data = unique_filter(data)
for row in data:
rows = [r for r in get_value(row, filters, base)]
print(' '.join(rows))
else:
print(json.dumps(jsondict))
elif aggregators:
data = jsondict['data']
if unique:
data = unique_filter(data)
for row in data:
if row['key'].find('size') != -1 and \
row['function'] == 'sum':
val = size_format(row['result']['value'], base)
else:
val = row['result']['value']
print('%s(%s)=%s' \
% (row['function'], row['key'], val))
else:
data = jsondict['data']
if isinstance(data, list):
old = None
val = None
for row in data:
prim_key = row.get('das', {}).get('primary_key', None)
if prim_key == 'summary':
print_summary(row)
return
val = prim_value(row)
if not opts.limit:
if val != old:
print(val)
old = val
else:
print(val)
if val != old and not opts.limit:
print(val)
elif isinstance(data, dict):
print(prim_value(data))
else:
print(data)
else:
jsondict = get_data(\
host, query, idx, limit, debug, thr, ckey, cert)
print(json.dumps(jsondict))
#
# main
#
if __name__ == '__main__':
main()
|
[
"Jacopo.Pazzini@pd.infn.it"
] |
Jacopo.Pazzini@pd.infn.it
|
5d1dd29b6073c8dc28f947bfaa07c99c5d3c2bdb
|
b316b7af230857311abe9071fa8118118d4c9cad
|
/test_utils/test_utils/vpcsc_config.py
|
b8854b2a46a48d88caab4bca7c85286c994efe33
|
[
"Apache-2.0"
] |
permissive
|
chrisrossi/google-cloud-python
|
686c7bf432696686df5a208ed91b8fec4643a37e
|
7a3a74d2cd3d3fef6c875b347ebd8595fb9b08fb
|
refs/heads/master
| 2020-04-03T22:45:10.899398
| 2019-11-06T16:36:30
| 2019-11-06T16:36:30
| 155,608,683
| 0
| 0
|
Apache-2.0
| 2018-10-31T18:56:39
| 2018-10-31T18:56:38
| null |
UTF-8
|
Python
| false
| false
| 4,174
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
INSIDE_VPCSC_ENVVAR = "GOOGLE_CLOUD_TESTS_IN_VPCSC"
PROJECT_INSIDE_ENVVAR = "PROJECT_ID"
PROJECT_OUTSIDE_ENVVAR = "GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_PROJECT"
BUCKET_OUTSIDE_ENVVVAR = "GOOGLE_CLOUD_TESTS_VPCSC_OUTSIDE_PERIMETER_BUCKET"
class VPCSCTestConfig(object):
"""System test utility for VPCSC detection.
See: https://cloud.google.com/vpc-service-controls/docs/
"""
@property
def inside_vpcsc(self):
"""Test whether the test environment is configured to run inside VPCSC.
Returns:
bool:
true if the environment is configured to run inside VPCSC,
else false.
"""
return INSIDE_VPCSC_ENVVAR in os.environ
@property
def project_inside(self):
"""Project ID for testing outside access.
Returns:
str: project ID used for testing outside access; None if undefined.
"""
return os.environ.get(PROJECT_INSIDE_ENVVAR, None)
@property
def project_outside(self):
"""Project ID for testing inside access.
Returns:
str: project ID used for testing inside access; None if undefined.
"""
return os.environ.get(PROJECT_OUTSIDE_ENVVAR, None)
@property
def bucket_outside(self):
"""GCS bucket for testing inside access.
Returns:
str: bucket ID used for testing inside access; None if undefined.
"""
return os.environ.get(BUCKET_OUTSIDE_ENVVAR, None)
def skip_if_inside_vpcsc(self, testcase):
"""Test decorator: skip if running inside VPCSC."""
reason = (
"Running inside VPCSC. "
"Set the {} environment variable to enable this test."
).format(INSIDE_VPCSC_ENVVAR)
skip = pytest.mark.skipif(self.inside_vpcsc, reason=reason)
return skip(testcase)
def skip_unless_inside_vpcsc(self, testcase):
"""Test decorator: skip if running outside VPCSC."""
reason = (
"Running outside VPCSC. "
"Unset the {} environment variable to enable this test."
).format(INSIDE_VPCSC_ENVVAR)
skip = pytest.mark.skipif(not self.inside_vpcsc, reason=reason)
return skip(testcase)
def skip_unless_inside_project(self, testcase):
"""Test decorator: skip if inside project env var not set."""
reason = (
"Project ID for running inside VPCSC not set. "
"Set the {} environment variable to enable this test."
).format(PROJECT_INSIDE_ENVVAR)
skip = pytest.mark.skipif(self.project_inside is None, reason=reason)
return skip(testcase)
def skip_unless_outside_project(self, testcase):
"""Test decorator: skip if outside project env var not set."""
reason = (
"Project ID for running outside VPCSC not set. "
"Set the {} environment variable to enable this test."
).format(PROJECT_OUTSIDE_ENVVAR)
skip = pytest.mark.skipif(self.project_outside is None, reason=reason)
return skip(testcase)
def skip_unless_outside_bucket(self, testcase):
"""Test decorator: skip if outside bucket env var not set."""
reason = (
"Bucket ID for running outside VPCSC not set. "
"Set the {} environment variable to enable this test."
).format(BUCKET_OUTSIDE_ENVVAR)
skip = pytest.mark.skipif(self.bucket_outside is None, reason=reason)
return skip(testcase)
vpcsc_config = VPCSCTestConfig()
|
[
"noreply@github.com"
] |
chrisrossi.noreply@github.com
|
a62efcd438da500c37657f34872bc7490d540f54
|
29d803b14ceb73cdbdd48083824cf474babee1af
|
/pygta5-4.py
|
c48ce88085f2a87bf285d08bc660750b13e727f7
|
[] |
no_license
|
burak-karakus/pygta5
|
5aa20c18d9b012afca7fced5be498e10723caab4
|
72d681570b4e12f782827e75084d94930f71e9bf
|
refs/heads/master
| 2022-07-06T08:14:02.855621
| 2020-05-15T15:06:59
| 2020-05-15T15:06:59
| 263,359,658
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
import numpy as np
from PIL import ImageGrab
import cv2
import time
from directkeys_mac import KeyPress,KeyDown,KeyUp
def roi(img, vertices):
mask = np.zeros_like(img)
cv2.fillPoly(mask, vertices, 255)
masked = cv2.bitwise_and(img, mask)
return masked
def process_img(org_img):
p_img = cv2.cvtColor(np.float32(org_img), cv2.COLOR_BGR2GRAY)
p_img = cv2.Canny(np.uint8(p_img), threshold1=200, threshold2=300)
vertices = np.array(([10,500],[10,300],[300,200],[500,200],[800, 300],[800,500]))
p_img = roi(p_img, [vertices])
return p_img
def main():
last_time = time.time()
while(True):
screen = ImageGrab.grab(bbox=(0,40,800,640))
new_screen = process_img(screen)
print('loop took {} seconds'.format(time.time()-last_time))
last_time=time.time()
cv2.imshow('window', new_screen)
#cv2.imshow('window', cv2.cvtColor(np.array(screen), cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
if __name__ == "__main__":
main()
|
[
"sadiburak@Sadi-MacBook-Pro.local"
] |
sadiburak@Sadi-MacBook-Pro.local
|
84d52f4962391da32f236a15c38fbc7c6a56035a
|
0d15d68b4e9c466ed63cd860887f3b49c219ec3d
|
/super_admin/backends.py
|
9b2c9c848d779187de3200e1e78ec8052880a7ff
|
[] |
no_license
|
shaheenhyderk/primis
|
729ac88ebc048bb5f2e5f56f47a8778c75146d05
|
0b737943df3a45ac432b85b101e5b91c36982534
|
refs/heads/master
| 2023-01-02T06:22:41.589842
| 2020-10-25T01:45:24
| 2020-10-25T01:45:24
| 306,910,593
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
import jwt
from rest_framework import authentication, exceptions
from django.conf import settings
from django.contrib.auth.models import User
class JWTAuthentication(authentication.BaseAuthentication):
def authenticate(self, request):
auth_data = authentication.get_authorization_header(request)
if not auth_data:
return None
prefix, token = auth_data.decode('utf-9').split(' ')
try:
payload = jwt.decode(token, settings.JWT_SECRET_KEY)
user = User.objects.get(username=payload['username'])
return (user, token)
except jwt.DecodeError as identifier:
raise exceptions.AuthenticationFailed('Token is invalid')
except jwt.ExpiredSignatureError as identifier:
raise exceptions.AuthenticationFailed('Token is expired')
return super().authenticate(request)
|
[
"shaheenhyderk@gmail.com"
] |
shaheenhyderk@gmail.com
|
e9850940fb4d78027c9ef247607e23a58b0feaaa
|
c12e9e88ca22a1c85eb55d5fa1b6e593e71edbb0
|
/duplicate_edit_pipeline/duplicate_pipeline.py
|
81b0ccbcda8d7362c1f93933050afeea31992dfb
|
[] |
no_license
|
deanproctor/streamsets_sdk_examples
|
a39e0b9484669f81c304df5a856547c360f05566
|
7fba054b9e0f475a9579c6e40d3b0da980809668
|
refs/heads/master
| 2020-05-23T16:30:58.084556
| 2019-05-15T15:17:10
| 2019-05-15T15:17:10
| 186,851,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 804
|
py
|
#!/usr/bin/env python
from streamsets.sdk import ControlHub
sch_url = 'https://cloud.streamsets.com'
sch_user = 'user@org'
sch_pass = 'mypassword123'
sch = ControlHub(sch_url, username=sch_user, password=sch_pass)
pipeline = sch.pipelines.get(name='myPipeline')
builder = sch.get_pipeline_builder()
builder.add_error_stage('Discard')
new_pipeline = builder.build()
pipeline_definition = pipeline._pipeline_definition
pipeline_stages = pipeline.stages
pipeline_definition['title'] = 'myNewPipeline'
pipeline_definition['stages'] = []
for stage in pipeline_stages:
pipeline_definition['stages'].append(stage._data)
new_pipeline._pipeline_definition = pipeline_definition
new_pipeline._data['pipeline_definition'] = pipeline_definition
sch.publish_pipeline(new_pipeline, 'My New Commit Message')
|
[
"dean@streamsets.com"
] |
dean@streamsets.com
|
4c160b362ff7db0589bf52739596171879e3d409
|
190d442aa9db6ef7e5a66e3e69db3c7737fbf48b
|
/Site/data_collections/signals/handlers.py
|
243b2a951143a853c36bbebcb0b4762968aeb213
|
[] |
no_license
|
Darbados/SWAnalysis
|
68e01482872403577dd08be2c63a9a2826a29ab7
|
2f097202694383f56fee997e1ea9a8ad8ec9491f
|
refs/heads/master
| 2023-09-01T03:50:49.439716
| 2021-10-13T08:12:34
| 2021-10-13T08:12:34
| 371,733,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
import os
from django.db import models
from django.dispatch import receiver
from data_collections.models import DataCollection
@receiver(models.signals.pre_delete, sender=DataCollection)
def auto_delete_collection_file_on_collection_delete(sender, instance, **kwargs):
if instance.file:
path = instance.file.path
if os.path.isfile(path):
try:
os.remove(path)
except Exception:
pass
|
[
"pesho.netev@gmail.com"
] |
pesho.netev@gmail.com
|
a3b674690a85d8b1b49ae220b7ba9776a941d9a3
|
740847ec6c7b70b97418d8ee517a3cb8ffff7fea
|
/hidroponic_server/hidroponic_server/urls.py
|
5e9a98e21d7b1e518dc58afa329a0805cd1374d5
|
[] |
no_license
|
JoaquinIMT/hidroponic_project
|
7dea723f30e74dbc9b9914afda55182f78562cf3
|
63c7620162669e9943e112ba29375454548b9e51
|
refs/heads/main
| 2023-08-23T18:58:33.805046
| 2021-09-19T03:29:38
| 2021-09-19T03:29:38
| 408,020,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 806
|
py
|
"""hidroponic_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('api.urls'))
]
|
[
"joaquinla07@gmail.com"
] |
joaquinla07@gmail.com
|
6c011191d8a77acf3a5f735a391eecb401da6ef2
|
44281037e35d704856554ae78a63a421b9ed530e
|
/work12.py
|
b07c991967000bd06c431b79d6e10225edf5192b
|
[] |
no_license
|
adivyas99/Gender-Recognition-Times-Internet-
|
97d8207afb9ceb473d91b3f841af25ec10ed1e3b
|
dfb3bfd2a40f466463b4b635930c72dce31b1c64
|
refs/heads/master
| 2020-07-07T14:44:54.877609
| 2019-09-19T10:54:06
| 2019-09-19T10:54:06
| 203,379,122
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,947
|
py
|
# taking by each user so that we can exact stuff
# Combining features and then applying this thing
# Implement UNDER sampling and both sampling -->>>
import numpy as np
import ast
import pandas as pd
from googletrans import Translator
translator = Translator()
translator.translate('good boy').text
Translator.translate(['The quick brown fox', 'jumps over', 'the lazy dog'])
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
# Making UrlsData.csv--->>>
'''
with open('Urls_data.txt') as json_file:
data = json_file.readlines()
user_id=[]
title=[]
link=[]
description=[]
long_desc=[]
brand=[]
entities=[]
tags = []
count=0
for i in data:
count+=1
x = ast.literal_eval(i)
print(count)
user_id.append(x['id'])
try:
title.append(x['title'])
except:
title.append('none')
try:
link.append(x['link'])
except:
link.append('none')
try:
description.append(x['description'])
except:
description.append('none')
try:
long_desc.append(x['long_description'])
except:
long_desc.append('none')
try:
brand.append(x['brand'])
except:
brand.append('none')
try:
tags.append(x['tags'])
except:
tags.append('none')
try:
entities.append(x['entities'])
except:
entities.append('none')
urls_data = pd.DataFrame({
'id': user_id,
'title': title,
'link': link,
'desc': description,
'long_desc': long_desc,
'brand': brand,
'tags': tags,
'entities': entities
})
urls_data.to_csv('urls_data.csv')
ast.literal_eval(data[109443]).keys()
'''
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
# Combining UserIdtoUrl folder (will be used for train and test data features!)
# inner merging it with train data making intermediate training data -->>>
'''
user_id=[]
url=[]
count=0
for i in range(0,10):
print(i)
j = '0000'+str(i)
with open('UserIdToUrl/part-'+j) as file:
data = file.readlines()
for i in data[1:]:
count+=1
print(count)
print(j)
av = i.split(',')
user_id.append(str(av[0]))
url.append(str(av[1]))
for i in range(10,12):
print(i)
j = '000'+str(i)
with open('UserIdToUrl/part-'+j) as file:
data = file.readlines()
for i in data[1:]:
count+=1
print(count)
print(j)
av = i.split(',')
user_id.append(str(av[0]))
url.append(str(av[1]))
print('dataframe--')
user_id_url_data = pd.DataFrame({
'userid': user_id,
'url': url
})
print('dataframe to csv--')
user_id_url_data.to_csv('user_id_url_data.csv')
UserIdToGender_Train = pd.read_csv('UserIdToGender_Train.csv')
print('string me convert--')
UserIdToGender_Train = UserIdToGender_Train.astype(str)
print('merge--')
df_merged = pd.merge(user_id_url_data, UserIdToGender_Train, on='userid')
print('mergefile to csv--')
df_merged.to_csv('df_merged.csv')
test_df = pd.read_csv('UserId_Test.csv')
df_merged_nonduplicates_train = df_merged.drop_duplicates()
df_merged_nonduplicates_train.to_csv('df_merged_nonduplicates_train.csv')
'''
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
user_id_url_data = pd.read_csv('user_id_url_data.csv')
user_id_url_data = user_id_url_data.loc[:, ~user_id_url_data.columns.str.contains('^Unnamed')]
user_id_url_data = user_id_url_data.astype(str)
UserIdToGender_Train = pd.read_csv('UserIdToGender_Train.csv')
UserIdToGender_Train = UserIdToGender_Train.loc[:, ~UserIdToGender_Train.columns.str.contains('^Unnamed')]
UserIdToGender_Train = UserIdToGender_Train.astype(str)
urls_data = pd.read_csv('urls_data.csv')
urls_data = urls_data.loc[:, ~urls_data.columns.str.contains('^Unnamed')]
urls_data = urls_data.astype(str)
#len(urls_data.url.unique())
def preprocess_strip(x):
x= str(x)
x= x.strip()
return x
user_id_url_data['url'] = user_id_url_data['url'].map(preprocess_strip)
# df_merged = Contain data for both train and test--
df_merged = pd.merge(user_id_url_data, UserIdToGender_Train, on='userid')
df_merged['url'] = df_merged['url'].map(preprocess_strip)
df_merged = df_merged.drop_duplicates()
# This will be only used only for training-->>>
df_merged_train = pd.merge(df_merged, urls_data, on='url')
df_merged_train = df_merged_train.drop_duplicates()
################
grouped = df_merged_train[df_merged_train['title'] != 'none']
grouped = grouped[grouped['title'] != 'nan']
grouped = grouped.groupby('userid')
#df_grouped = pd.DataFrame(columns = ['userid','title','gender'])
user_ids = []
descriptions = []
genders = []
j=0
for name,group in grouped:
print(j)
j+=1
#print(name_of_group)
#print(df)
group = group.drop_duplicates(subset = 'title')
#desc_s = ''
gen = group.gender.iloc[0]
#print(gen)
descs = group.title.str.cat(sep=' ')
#print(descs)
#print(len(descs))
#desc_ +=group.desc.map(add_all_desc)
#print(len(str(desc_)))
#y = group.desc.map(add_all_desc_y)
#print(len(str(y)))
#print(y)
user_ids.append(name)
descriptions.append(descs)
genders.append(gen)
#df_grouped = df_grouped.append({'userid' : name , 'desc' : descs,'gender':gen} , ignore_index=True)
df_grouped = pd.DataFrame({'userid':user_ids,
'title':descriptions,
'gender':genders})
# To CSVs-->>>
'''
user_id_url_data.to_csv('user_id_url_data.csv')
UserIdToGender_Train.to_csv('UserIdToGender_Train.csv')
urls_data.to_csv('urls_data.csv')
df_merged.to_csv('df_merged.csv')
df_merged_train.to_csv('df_merged_train.csv')
'''
#==
#df_merged_train_exp = df_merged_train.drop_duplicates(subset = 'tags')
## Female Keywords-
Female_df = df_merged_train[df_merged_train['gender'] == 'F']
#Female_df = Female_df.drop_duplicates(subset = 'tags')
Female_df = Female_df[Female_df['tags'] != 'none']
keywords_F = []
for i in Female_df.tags[:]:
print(i)
#i= exec('%s'%(i))
exec('k=list(%s)'%(i))
#k=list(k)
#print(k)
for j in k:
keywords_F.append(j)
keywords_F = list(set(keywords_F))
keywords_F.remove('')
## Male Keywords-
Male_df = df_merged_train[df_merged_train['gender'] == 'M']
#Male_df = Male_df.drop_duplicates(subset = 'tags')
Male_df = Male_df[Male_df['tags'] != 'none']
keywords_M = []
for i in Male_df.tags[:]:
print(i)
#i= exec('%s'%(i))
exec('k=list(%s)'%(i))
#k=list(k)
#print(k)
for j in k:
keywords_M.append(j)
keywords_M = list(set(keywords_M))
keywords_M.remove('')
#--------------------
## Converting for Male-
Male_df = urls_data.copy()
Male_df = Male_df[Male_df['tags'] != 'none']
#dele = Male_df[Male_df.duplicated(['desc'])]
#dele = Male_df.duplicated(subset='desc', keep='first')
Male_df = Male_df.drop_duplicates(subset ='desc')
#Male_df = Male_df.drop_duplicates()
Male_df_copy = Male_df.copy()
Male_df_copy['gender']=[''for i in range(Male_df_copy.shape[0])]
sz = Male_df.shape[0]
indices = list(Male_df.index)
c=0
j=0
for i in indices:
#print(type(i))
av = Male_df.tags[i]
print(c)
c+=1
exec('k=list(%s)'%(av))
a_set = set(keywords_F)
b_set = set(k)
if len(a_set.intersection(b_set))>4:
j+=1
print('--')
Male_df_copy.gender[i]='F'
Male_df_copy = Male_df_copy[Male_df_copy['gender']=='F']
## Converting for Female-
Female_df = urls_data.copy()
Female_df = Female_df[Female_df['tags'] != 'none']
#dele = Female_df[Female_df.duplicated(['desc'])]
#dele = Female_df.duplicated(subset='desc', keep='first')
Female_df = Female_df.drop_duplicates(subset ='desc')
#Female_df = Female_df.drop_duplicates()
Female_df_copy = Female_df.copy()
Female_df_copy['gender']=[''for i in range(Female_df_copy.shape[0])]
sz = Female_df.shape[0]
indices = list(Female_df.index)
c=0
j=0
for i in indices:
#print(type(i))
av = Female_df.tags[i]
print(c)
c+=1
exec('k=list(%s)'%(av))
a_set = set(keywords_M)
b_set = set(k)
if len(a_set.intersection(b_set))>4:
j+=1
print('--')
Female_df_copy.gender[i]='M'
Female_df_copy = Female_df_copy[Female_df_copy['gender']=='M']
#--------------------
df_merged_train_c = df_merged_train.copy()
df_merged_train_c = df_merged_train_c.drop('userid', axis=1)
columns_for_d_fmerged_train = ['id', 'title', 'url', 'desc', 'long_desc', 'brand', 'tags', 'entities', 'gender']
df_merged_train_c = df_merged_train_c[columns_for_d_fmerged_train]
df_merged_train_c_F = df_merged_train_c.copy()[df_merged_train_c['gender']=='F']
df_merged_train_c_F['gender'] = ['M' for i in range(df_merged_train_c_F.shape[0])]
df_merged_train_c_M = df_merged_train_c.copy()[df_merged_train_c['gender']=='M']
df_merged_train_c_M['gender'] = ['F' for i in range(df_merged_train_c_M.shape[0])]
df_all_rows = pd.concat([Female_df_copy, Male_df_copy, df_merged_train_c, df_merged_train_c_F,df_merged_train_c_M])
#--------------------
# Making Final Training datsets-->>>
df_merged_train_final = df_grouped.copy().loc[:,['title','gender']]
#df_grouped.to_csv('df_grouped.csv')
# to english language--
import string
translate_string = str.maketrans(string.punctuation, ' '*len(string.punctuation))
exclude = set(string.punctuation)
import re
c=0
import json
alpha_set=set('qwertyuiopasdfghjklzxcvbnm')
any_to_en={}
def to_english(x):
global c
print(c,'------\n')
#print(x)
c+=1
#x = "{0}".format(x)
x=x.strip()
x = x.lower()
x = x.replace("\'"," ")
x = re.sub(r'\d+', " ", x)
x = ''.join(ch for ch in x if ch not in exclude)
x = x.translate(translate_string)
exec('x="%s"'%(x))
#x=x.replace(r"'", r"\' ")
#print('-------\n')
#print(x)
#x = x.strip()
#print('********')
#x=x.__repr__()
#x=json.dumps(x)
if x in any_to_en.keys():
trans_final = any_to_en[x]
else:
#else:
#x=x[:2000]
final=" "
trans_final = " "
words=x.split()
#print(words)
for i in range(len(words)):
#print(i)
#print(words[i])
if len(set(words[i]).intersection(alpha_set))==0:
final = final+' '+words[i]
try:
#print('cccccccccccccccccç')
if len(set(words[i+1]).intersection(alpha_set))==0:
#print('*')
continue
else:
trans_final = trans_final+ " " + translator.translate(final).text
final=" "
#print('cooooooooolooooooooo')
except:
#print('***')
break
else:
trans_final = trans_final+" " +words[i]
any_to_en[x]=trans_final
#print(len(words))
#print('>>>>>>>>>>>>')
trans_final = trans_final.strip()
print(trans_final)
#print(final)
#if y.src !='en':
#try:
#print('iiiii\n')
#y=str(y.text)
#print('av'*10)
#except:
#print (len(x))
#else:
#y= str(y.origin)
#any_to_en[x] = x
#print(y)
return trans_final
df_merged_train_final_translated = df_merged_train.copy()
df_merged_train_final_translated['title'] = df_merged_train['title'].map(to_english)
df_merged_train_final = df_merged_train_final_translated.copy()
#df_merged_train_final = df_all_rows.loc[:,['desc','gender']]
df_merged_train_final = df_merged_train_final.drop_duplicates()
df_merged_train_final = df_merged_train_final.astype(str)
def unique_word_only(x):
x = x.lower()
y = x.split()
y = ' '.join(list(Counter(y).keys()))
return y
df_merged_train_final['desc'] = df_merged_train_final['desc'].map(unique_word_only)
#df_merged_train_final['desc'] = df_merged_train['desc']+df_merged_train['desc']
#df_merged_train_final.to_csv('df_merged_train_final.csv')
#len(df_merged_train_final[df_merged_train_final['desc']=='none'])
'''# Under Sampling-->>
# Class count
count_class_M, count_class_F = df_merged_train_final.gender.value_counts()
# Divide by class
df_class_M = df_merged_train_final[df_merged_train_final['gender'] == 'M']
df_class_F = df_merged_train_final[df_merged_train_final['gender'] == 'F']
df_class_M_under = df_class_M.sample(count_class_F+20000,random_state=42)
df_merged_train_final = pd.concat([df_class_F, df_class_M_under], axis=0)
print('Random over-sampling:')
print(df_merged_train_final.gender.value_counts())'''
# Class count Under
count_class_M, count_class_F = df_merged_train_final.gender.value_counts()
# Divide by class
df_class_M = df_merged_train_final[df_merged_train_final['gender'] == 'M']
df_class_F = df_merged_train_final[df_merged_train_final['gender'] == 'F']
df_class_M_under = df_class_M.sample(count_class_F+25000,random_state=42)
df_merged_train_final = pd.concat([df_class_F, df_class_M_under], axis=0)
print('Random over-sampling:')
print(df_merged_train_final.gender.value_counts())
# Class count Over
count_class_M, count_class_F = df_merged_train_final.gender.value_counts()
# Divide by class
df_class_M = df_merged_train_final[df_merged_train_final['gender'] == 'M']
df_class_F = df_merged_train_final[df_merged_train_final['gender'] == 'F']
df_class_F_over = df_class_F.sample(count_class_M-25000, replace=True, random_state=42)
df_merged_train_final = pd.concat([df_class_M, df_class_F_over], axis=0)
df_merged_train_final = df_merged_train_final.astype(str)
print('Random over-sampling:')
print(df_merged_train_final.gender.value_counts())
'''
#### Preprocessing-->>>
# Normalizing and encoding
import unicodedata
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import ToktokTokenizer
# making short name-
lemma=WordNetLemmatizer()
token=ToktokTokenizer()
from nltk.corpus import stopwords
stopWordList=stopwords.words('english')
stopWordList.remove('no')
stopWordList.remove('not')
#import spacy
#nlp = spacy.load('en_core', parse=True, tag=True, entity=True)
# NFKD - Compatibility Decomposition
def removeAscendingChar(data):
data=unicodedata.normalize('NFKD', data).encode('ascii', 'ignore').decode('utf-8', 'ignore')
return data
# Removing diff characters-
def removeCharDigit(text):
str='`1234567890-=~@#$%^&*()_+[!{;":\'><.,/?"}]'
for w in text:
if w in str:
text=text.replace(w,'')
return text
# choosing root word-
def lemitizeWords(text):
words=token.tokenize(text)
listLemma=[]
for w in words:
x=lemma.lemmatize(w,'v')
#print(x)
listLemma.append(x)
return text
# Removing stop words-
def stopWordsRemove(text):
wordList=[x.lower().strip() for x in token.tokenize(text)]
removedList=[x for x in wordList if not x in stopWordList]
text=' '.join(removedList)
#print(text)
return text
# Running above functions-
def PreProcessing(text):
text=removeCharDigit(text)
#print(text)
text=removeAscendingChar(text)
#print(text)
text=lemitizeWords(text)
#print(text)
text=stopWordsRemove(text)
#print(text)
return(text)'''
'''
totalText=''
count=0
for x in df_merged_train_final['desc']:
ps=PreProcessing(x)
totalText=totalText+" "+ps # Single variable with all the body
print (count)
count+=1
f= open("/Users/anilvyas/Desktop/TILCompleteDataSet/totalText.txt","w+")
f.write(totalText)'''
#0-F
#1-M
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(df_merged_train_final.title)
X_train_counts.shape
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
X_train_tfidf.shape
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
df_merged_train_final.gender = labelencoder_X.fit_transform(df_merged_train_final.gender)
labelencoder_X.inverse_transform([0])
from sklearn.pipeline import Pipeline
'''
from sklearn.linear_model import SGDClassifier
text_clf_svm = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),
('clf-svm', SGDClassifier( random_state=42))])
text_clf_svm = text_clf_svm.fit(df_merged_train_final.desc, df_merged_train_final.gender)
'''
'''
from sklearn.tree import DecisionTreeClassifier
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', DecisionTreeClassifier()) ])
text_clf = text_clf.fit(df_merged_train_final.desc, df_merged_train_final.gender)
'''
'''
from sklearn import ensemble,feature_extraction
clf=Pipeline([
('tfidf_vectorizer', feature_extraction.text.TfidfVectorizer(lowercase=True)),
('rf_classifier', ensemble.RandomForestClassifier(n_estimators=500,verbose=1,n_jobs=-1))
])
clf.fit(df_merged_train_final.desc,df_merged_train_final.gender)
'''
'''
from sklearn.naive_bayes import MultinomialNB
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("english", ignore_stopwords=True)
class StemmedCountVectorizer(CountVectorizer):
def build_analyzer(self):
analyzer = super(StemmedCountVectorizer, self).build_analyzer()
return lambda doc: ([stemmer.stem(w) for w in analyzer(doc)])
stemmed_count_vect = StemmedCountVectorizer(stop_words='english')
text_clf = Pipeline([('vect', stemmed_count_vect), ('tfidf', TfidfTransformer()),
('mnb', MultinomialNB(fit_prior=True))])
text_clf = text_clf.fit(df_merged_train_final.desc, df_merged_train_final.gender)
predicted_mnb_stemmed = text_clf.predict(test_df_merged_final.desc)
predicted = predicted_mnb_stemmed
'''
#from sklearn.naive_bayes import GaussianNB
'''clf = MultinomialNB().fit(X_train_tfidf, df_merged_train_final.gender)'''
'''
from sklearn.naive_bayes import MultinomialNB
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()) ])
text_clf = text_clf.fit(df_merged_train_final.title, df_merged_train_final.gender)
'''
len(df_merged_train_final.loc[df_merged_train_final['gender']==1])
len(df_merged_train_final[(df_merged_train_final.desc=='none')])
### Check that the desc is none--ABOVE
#For Test-->>>
test_df = pd.read_csv('UserId_Test.csv')
test_df = test_df.drop_duplicates()
test_df = test_df.astype('str')
test_df_merged = pd.merge(user_id_url_data, test_df, on='userid')
test_df_merged['url'] = test_df_merged['url'].map(preprocess_strip)
test_df_merged = test_df_merged.drop_duplicates()
test_df_merged_final = pd.merge(urls_data, test_df_merged, on='url')
#test_df_merged_final['desc']= test_df_merged_final['title']+ test_df_merged_final['desc']
test_grouped = test_df_merged_final[test_df_merged_final['title'] != 'none']
test_grouped = test_grouped[test_grouped['title'] != 'nan']
test_grouped = test_df_merged_final.groupby('userid')
#df_grouped = pd.DataFrame(columns = ['userid','desc','gender'])
user_ids = []
descriptions = []
#genders = []
j=0
for name,group in test_grouped:
print(j)
j+=1
#print(name_of_group)
#print(df)
group = group.drop_duplicates(subset = 'title')
#desc_s = ''
#gen = group.gender.iloc[0]
#print(gen)
descs = group.title.str.cat(sep=' ')
#print(descs)
#print(len(descs))
#desc_ +=group.desc.map(add_all_desc)
#print(len(str(desc_)))
#y = group.desc.map(add_all_desc_y)
#print(len(str(y)))
#print(y)
user_ids.append(name)
descriptions.append(descs)
#genders.append(gen)
#df_grouped = df_grouped.append({'userid' : name , 'desc' : descs,'gender':gen} , ignore_index=True)
test_df_grouped = pd.DataFrame({'userid':user_ids,
'title':descriptions})
test_df_grouped.to_csv('test_df_grouped.csv')
test_df_merged_final = test_df_grouped.copy()
test_df_merged_final['desc'] = test_df_merged_final['desc'].map(unique_word_only)
## Prediction
#av = pd.Series(['av cool'])
#text_clf.predict(av)
predicted = text_clf.predict(test_df_merged_final.title) #747
#np.mean(predicted == twenty_test.target)
predicted_copy = predicted.tolist()
'''
from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=0)
X_resampled, y_resampled = ros.fit_resample(df_merged_train_final.desc, df_merged_train_final.gender)
'''
from collections import OrderedDict
test_cases = OrderedDict()
test_cases2 = OrderedDict()
for i in test_df.iloc[:,0]:
test_cases[str(i)] = []
test_cases2[str(i)] = []
predicted=predicted.reshape(88384,1)
size = test_df_merged_final.shape[0]
for i in range(size):
print(i)
#predicted = text_clf.predict(pd.Series([test_df_merged_final.loc[i,'desc']]))
av=str(predicted[i,0])
usr_id = str(test_df_merged_final.loc[i,'userid'])
if usr_id not in test_cases.keys():
test_cases[usr_id]=[]
test_cases[usr_id].append(av)
else:
test_cases[usr_id].append(av)
#test_cases[str(test_df_merged_final.loc[i,'userid'])].append(predicted[0])
'''count=0
from collections import Counter
for i in test_cases.keys():
print(count)
count+=1
av = list(Counter(test_cases[i]).keys())
cool = list(Counter(test_cases[i]).values())
print(cool)
try:
if cool[0]>cool[1]:
test_cases2[i] = av[0]
elif cool[0]<cool[1]:
test_cases2[i] = av[1]
else:
test_cases2[i] = 'M'
except:
test_cases2[i] = []'''
#gk = test_df_merged_final.groupby('userid').get_group('1000021')
#gk.first()
'''submission = test_df.copy()
submission['gender'] = ['M']*88384
submission.to_csv('submission.csv',index=False)
'''
'''test_on_predicted=list(predicted.reshape(1834125,))
cc = pd.Series(test_on_predicted[:])
cc.value_counts()'''
#small
#classify acc to subcategory like business
'''# Single presence-->>>
count=0
from collections import Counter
for i in test_cases.keys():
print(count)
count+=1
#av = list(Counter(test_cases[i]).keys())
#cool = list(Counter(test_cases[i]).values())
#print(cool)
if 'F' in test_cases[i]:
test_cases2[str(i)] = ['F']
else:
test_cases2[str(i)] = ['M']
'''
#test_cases2 = OrderedDict()
# Another logic-->>
count=0
from collections import Counter
for i in test_cases.keys():
print(count)
count+=1
av = list(Counter(test_cases[i]).keys())
cool = list(Counter(test_cases[i]).values())
#print(cool)
if 'F' in test_cases[i] and 'M' not in test_cases[i]:
test_cases2[str(i)] = ['F']
elif 'F' in test_cases[i]:
av = list(Counter(test_cases[i]).keys())
cool = list(Counter(test_cases[i]).values())
ind = av.index('F')
if cool[ind]/sum(cool)> 1-cool[ind]/sum(cool):
test_cases2[str(i)] = ['F']
else:
test_cases2[str(i)] = ['M']
else:
test_cases2[str(i)] = ['M']
submission = pd.DataFrame.from_dict(test_cases, orient='index')
submission['userid'] = submission.index
submission['gender'] = submission[0]
submission = submission.iloc[:,1:]
submission = submission.astype('str')
submission.to_csv('submission.csv',index=False)
print(submission['gender'].value_counts())
#print('\a')
# 81.6%
# Resampling over wali and also to dec the no pf male exampkes
# Bert classifier
# Using long dess,desc and combi to detect the same as above
# Classifier
# both over and under resampling
# COlumn that is most efficient in predicting that shit- Desc_col is good one
|
[
"noreply@github.com"
] |
adivyas99.noreply@github.com
|
0d86c1a673ca1bf515b589a6acb7e6df2a38dbe3
|
62664aed311b6f1e67895893ebbfc36d186f7053
|
/Modules/Config/Config.py
|
ce59cd9fc0dda896df415975ca25a30ad7987f22
|
[] |
no_license
|
mdobrychlop/pyry3d
|
0a9c332a530c11f1cdd891d379253d92f8d44cba
|
44ea539179e41545fbbf5c38f515e377934dbd67
|
refs/heads/master
| 2021-05-08T00:36:11.839372
| 2017-10-20T13:40:13
| 2017-10-20T13:40:13
| 107,682,963
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 35,180
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
# www.genesilico.pl
#
#creates ranked 3D models of macromoleular complexes
#based on experimental restraints and a whole complex shape.
__author__ = "Joanna M. Kasprzak"
__copyright__ = "Copyright 2010, The PyRy3D Project"
__credits__ = ["Janusz Bujnicki"]
__license__ = "GPL"
__version__ = "0.1.0"
__maintainer__ = "Joanna Kasprzak"
__email__ = "jkasp@amu.edu.pl"
__status__ = "Prototype"
from Modules.Error.Errors import ConfigError, Component_movesError
from Modules.Constans.Logfile import logfile
from Modules.Trans.Component import Movable
import os
#@TODO: clean this module!!!
#@TODO: remove code duplications
#@TODO: check data correctness provided by the user and default values
class Config:
"""
class which represents simulation configuration data:
gets simulation parameters and delegates them to SIMUL module;
gets simulation output data and delegates them to OUT_set module;
#prepare input data for simulation:
#outside a map, no restraints kept, collisions, empty spaces in the map
#give a Simul module information about number os simulation steps
"""
def __init__(self):
self.anntemp = 10 #annealing temperature
self.disabled_mutations = [] #names of mutations with frequencies of 0
self.clashes_penalty = [1., 1.] #penalty for collisions between complex components
self.curve = False #True if user provides curve file
self.components_indexes = {} #how components are indexed in complex.components list
self.chi2_penalty = [0.,0.] #penalty for disagreement with SAXS curve according to chi2 value
self.crysol_path = "" #path to CRYSOL installed on Ubuntu system
self.density_penalty = [0., 0,] #penalty ranges for occuping points of map with low density
self.exchange_freq = 0.0 #frequency of components' exchange mutation
self.exchangeandsample_freq = 0.0 #frequency of components' exchange mutation
self.freespace_penalty = [1., 1.] #penalty for empty spaces in density map
self.identify_disorders = False #True if PyRy3D should check for disorders, False if not (faster)
self.iter_nr = 0 #simulation step number
self.kvol = 1.0 #how many complex volumes will define density map
self.linked_components = {} #if some somponents are linked by covalent bond
self.is_chi2_defined = False
self.is_rg_penalty_defined = False
self.max_rot_angle = 5. #max rotation angle in single simulation move
self.max_trans_vec = [5.,5.,5.] #max translation vector in single simulation move
self.maxpoolsize = 100 #maximum pool size for genetic algorithm
self.movable = [] #list of components with defined movements
self.movehistory = "" #filename with history of moves for all saved complexes
self.niter = 0 #which steps will be added to trajectory
self.outbox_penalty = [1., 1.] #penalty for beeing outside simulation box
self.out_steps = ["FIRST","LAST"] #which steps to include in output data? default None
self.param_scaling = "off" #scaling procedure can be "on" or "off"
self.param_scaling_ranges = [0,25,50] #values for scaling ranges during simulation
self.scaling_ranges = [[50,100], [25,50], [0,25]] #values of scaled params
self.param_scaling_range1 = [50,100] #values of scaled params in first range
self.param_scaling_range2 = [25,50] #values of scaled params in second range
self.param_scaling_range3 = [0,25] #values of scaled params in third range
self.rg_penalty = [0.,0.] #penalty for disagreement with SAXS curve according to Rg value
self.rg_val = 0.0 #Rg value given by user
self.reductmethod = "Roulette" #reduction method for genetic algorithm
self.reheat = False
self.reheat_rejections = None
self.replica_exchange_freq = 1 #how often replicas will be exchanged; default every 10% of simulation steps
self.repl_exch_freq = False #parameter replicaexhchangefreq not defined by the user
self.replica_temps = [400, 350, 300, 250, 200, 150, 100, 50, 25, 0] #list of replica simulations' temperatures
self.representation = "ca" #structure representation for components; #CA - only calfas/c4'; cg - coarse grain, fa - full atom (default)
self.restraints_penalty = [1., 1.] #penalty for not keeping user defined restraints
self.mutation_frequencies = {"rotation" : 0.5, "translation" : 0.5}
self.rotation_freq = 0.5 #frequency of component's rotation mutation
self.rotation_all_freq = 0.0
self.rotation_cov_freq = 0.0
self.rotation_whole_freq = 0.0
self.saxsradius = 0.0
self.symmetry_penalty = [0.,0.]
self.write_eachbetter = False
self.save_res_mode = "outsteps" #by default only best energy complex is saved
self.shapedesc = True #True when modeling is with use of map or saxs shape
self.simbox = 1.2 #how many times bigger than map is a simulbox
self.simboxradius = 2. #radius of a single grid cell in Angstroms
self.simgridtype = "cubic" #type of simulation grid; at the moment only cubic grid is available
self.simmethod = "simulatedannealing" #simulation algorithm
self.simul_dd_freq = 0.0 #frequency of components' simulate disorder mutation
self.simul_steps = 100 #number of simulation step to perform
self.start_orient = False #has a user set first complex conformation
self.struct_nr = 1 #number of best scores out structures; default 0.1simul_steps
self.threshold = None #min density value accepted in simulation
self.translation_freq = 0.5 #frequency of component's translation mutation
self.translation_all_freq = 0.0
self.required_clashes_penalty = False
self.required_clashes_penalty_allatoms = False
self.is_density_defined = False
self.is_freespace_defined = False
self.kvol_given = False
self.threshold_given = False
def __str__(self):
return "%s %s %s %s %s %s %s" % \
( self.simul_steps, self.freespace_penalty, \
self.clashes_penalty, self.outbox_penalty, self.restraints_penalty,\
self.out_steps, self.struct_nr)
def __open_config_file(self,filename):
try:
fh = open(filename, "r")
except ConfigError: print "Cannot open config file!!", filename
return fh
def __parse_covalent_links(self, line):
try: linked_components = eval(line.split()[2].strip()) #.replace("[","").replace("]","").replace(",",""))
except ConfigError: print "Please provide correct format for COVALENT_BONDS parameter in configuration file"
#linked_components = line.split()[2].replace("[","").replace("]","").replace(",","")
component = line.split()[1]
if component not in self.linked_components.keys():
for el in linked_components:
if el == component:
raise ConfigError("Component %s cannot be bound with %s "%(component, el))
#self.linked_components[component] = linked_components
at1 = eval(line.split()[3])
at2 = eval(line.split()[4])
covbond = CovalentBond(linked_components, at1, at2)
if self.linked_components.has_key(component):
self.linked_components[component].append(covbond)
else:
self.linked_components[component] = [covbond]
#else:
# raise ConfigError("You provided more than one covalent bond info for a component:%s"%(component))
#print "!!!!!", self.linked_components
def __parse_crysol_path(self, line):
"parse path to crysol"
path = line.split()[1]
try:
os.path.exists(path)
except ConfigError: print "Please provide valid path to CRYSOL program on your disk"
self.crysol_path = path
def __parse_reheating_param(self, line):
line = line.split()
reheat = line[1]
reheat_rejections = line[2]
if reheat.upper() not in ["TRUE", "FALSE"]: raise ConfigError("Reheat is of value True or False")
if reheat_rejections.isalpha(): raise ConfigError("Rejection must be float value")
elif reheat.upper() == "TRUE" and float(reheat_rejections) > self.simul_steps : raise ConfigError ("Rejection frequency cannot be larger than number of simulation steps")
elif float(reheat_rejections) <= 0 : raise ConfigError ("Rejection frequency cannot be lower than 0")
if reheat.upper() == "TRUE": self.reheat = True
else: self.reheat = False
self.reheat_rejections = float(reheat_rejections)
def __parse_score_weights_penalties(self, line, param_name):
values = [line.split()[1], line.split()[2]]
if len(values) != 2 : raise ConfigError(str(param_name).upper()+" parameter you must provide 2 values")
if values[0].upper() == "X" or values[1].upper() == "X": return 1
if values[0].isalpha() or values[1].isalpha(): raise ConfigError(str(param_name).upper()+"must be DIGID not alfanumeric value")
elif float(values[0]) >= 0 and float(values[1]) >=0 : setattr(self, param_name, [float(values[0]), float(values[1])])
else: raise ConfigError(str(param_name).upper()+"value must be positive value!")
if param_name == "freespace_penalty":
if float(values[0]) == 0. and float(values[1]) == 0:
self.is_freespace_defined = False
else:
self.is_freespace_defined = True
if param_name == "density_penalty":
if float(values[0]) == 0. and float(values[1]) == 0:
self.is_density_defined = False
else:
self.is_density_defined = True
if param_name == "chi2_penalty":
if float(values[0]) == 0. and float(values[1]) == 0:
self.is_chi2_defined = False
else:
self.is_chi2_defined = True
if param_name == "rg_penalty":
if float(values[0]) == 0. and float(values[1]) == 0:
self.is_rg_penalty_defined = False
else:
self.is_rg_penalty_defined = True
def __parse_mutation_frequencies(self, line, param_name):
value = line.split()[1]
floatValue = float(value)
if value.upper() == "X": return 1
#mutation_name = param_name.split("_")[0]
mutation_name = param_name.replace("_freq","")
if mutation_name == "simul_dd": mutation_name = "SimulateDisorder"
if floatValue == 0: self.disabled_mutations.append(mutation_name)
if value.isalpha() : raise ConfigError(str(param_name)+"must be DIGID not alfanumeric value")
elif 0. <= floatValue <= 1. : setattr(self, param_name, floatValue)
else: raise ConfigError("Frequency of"+str(param_name).upper()+"must be defined in range from 0 to 1")
return {mutation_name: floatValue}
def __parse_positive_params(self, line, param_name, type = "float"):
"""
"""
value = line.split()[1]
if value.upper() == "X": return 1
if value.isalpha() : raise ConfigError(str(param_name)+"must be DIGID not alfanumeric value")
if not param_name == "threshold":
try:
float(value) < 0
except:
raise ConfigError(str(param_name).upper()+"must be positive value! Use dots for float numbers.")
if type == "float":
setattr(self, param_name, float(value))
if type == "int":
setattr(self, param_name, int(value))
def __parse_param_value(self, line, param_name, list_of_values):
"""
"""
value = line.split()[1]
found = False
if value.upper() == "X" : return 1
if value.isdigit() : raise ConfigError(str(param_name).upper()+"must be alfanumeric not DIGIT value")
for val in list_of_values:
if value.lower() == val.lower():
found = True
break
if found == False:
raise ConfigError(str(param_name).upper()+" has only possible values "+"".join(list_of_values) )
if param_name == "simmethod" and value.upper() == "X": setattr(self,param_name, "simulatedannealing")
else: setattr(self,param_name, value.lower())
def __parse_rot_angle(self, line):
"""
"""
maxrot = line.split()[1]
if maxrot.upper() == "X": return 1
if maxrot.isalpha() : raise ConfigError("MAXROT must be DIGID not alfanumeric value")
if -360.0 > float(maxrot) > 360.: raise ConfigError("Rotation angle cannot be larger than 360 degrees or smaller than -360")
else: self.max_rot_angle = float(maxrot)
def __parse_outsteps(self, line):
"""
"""
out_steps = [line.split()[1], line.split()[2]]
if len(out_steps) != 2 : raise ConfigError("for OUTSTEPS parameter you must provide 2 values")
if (out_steps[0].upper() == "X" and out_steps[1].upper() == "X") \
or (out_steps[0].upper() == "FIRST" and out_steps[1].upper() == "LAST"):
self.out_steps.append(1)
self.out_steps.append(self.simul_steps-1)
elif float(out_steps[1]) > self.simul_steps:
raise ConfigError("Steps value cannot be larger than number of simulations!")
elif (out_steps[0] > 0 and out_steps[1] > 0): self.out_steps = [int(line.split()[1]), int(line.split()[2])]
else: raise ConfigError("OutSteps value must be positive value!")
def __parse_trans_vector(self, line):
"""
"""
trans_vec = [line.split()[1], line.split()[2], line.split()[3]]
if len(trans_vec) != 3 : raise ConfigError("for MAXTRANS parameter you must provide 3 values")
if trans_vec[0].upper() == "X" or trans_vec[1].upper() == "X" or trans_vec[2].upper() == "X": return 1
elif trans_vec[0].isalpha() or trans_vec[1].isalpha() or trans_vec[2].isalpha() : raise ConfigError("MAXTRANS must be DIGIDS not alfanumeric value")
self.max_trans_vec = [float(trans_vec[0]), float(trans_vec[1]), float(trans_vec[2])]
def __parse_scaling(self, line, param_name, counter):
"""
"""
if counter == 3:
values = [float(line.split()[1]), float(line.split()[2]), float(line.split()[3])]
elif counter == 2:
values = [float(line.split()[1]), float(line.split()[2])]
percentValues = []
for val in values:
if str(val).isalpha(): raise ConfigError(param_name.upper()+"must be DIGITS not alfanumeric value")
elif 100 < val < 0: raise ConfigError(param_name.upper()+"must be from range 0 to 100")
percentValues.append(val/100.0)
setattr(self, param_name, percentValues)
return percentValues
def __parse_movestate(self, line):
"""
"""
chain_name = line.split()[1]
move_state = line.split()[2]
if move_state.lower() == "fixed" or move_state.lower() == "movable": pass
else: raise ConfigError("unknown state %s"%(move_state))
move_state = Movable(chain_name, line)
#print "@@@@@@@@@@2", move_state
self.movable.append(move_state)
def __parse_kvol(self, line):
"""
"""
kvol = line.split()[1]
if kvol.isalpha() : raise ConfigError("KVOL must be DIGIDS not alfanumeric value")
if kvol.upper() == "X": return 1
else:
if (float(kvol) > 10 or float(kvol) < 0):
raise ConfigError("Volume you provided is not in the range from 0 to 10!%s "%(kvol))
elif float(kvol) == 0:
raise ConfigError("Volume must be larger than 0!")
self.kvol = float(kvol)
def __parse_bool_param(self,line, param_name):
val = line.split()[1]
if val.upper() not in ["TRUE", "FALSE"]:
raise ConfigError('%s can be "True" or "False"'%(param_name.upper()))
else:
if val.upper() == "TRUE": val = True
else: val=False
setattr(self, param_name, val)
def __parse_values_list(self, line, param_name):
"""
retrieves list of digits of unknown length
"""
li = line.split()[1:]
temps = []
for el in li:
#do not parse elements after comment indicator '#'
if "#" in el:
break
elif el.isdigit(): temps.append(float(el))
setattr(self, param_name, temps)
def parse_config_file(self, filename, curvefile, shapedesc = True):
"""
parses input config file which contains simulation parameters and
scoring function elements (e.g. penalties for collisions)
Parameters:
-----------
filename : name of file with simulation parameters
comp_names : names of complex components
Returns:
--------
config object storing all simulation data
"""
self.shapedesc = shapedesc
self.curve = curvefile
if filename == "":
self.save_logfile()
return 1
fh = self.__open_config_file(filename)
#to check whether both params are not defined at the same time
for line in fh:
if line.startswith("#"): continue
elif line.upper().startswith("STEPS"):
self.__parse_positive_params(line,'simul_steps', "int")
elif line.upper().startswith("REHEAT"):
self.__parse_reheating_param(line)
#parse score weights ---------------------------------
elif line.upper().startswith("OUTBOX"):
self.__parse_score_weights_penalties(line,'outbox_penalty')
elif line.upper().startswith("MAP_FREESPACE"):
self.__parse_score_weights_penalties(line, 'freespace_penalty')
elif line.upper().startswith("CLASHES "):
self.required_clashes_penalty = True
self.__parse_score_weights_penalties(line, 'clashes_penalty')
elif line.upper().startswith("CLASHES_ALLATOMS"):
self.required_clashes_penalty_allatoms = True
self.__parse_score_weights_penalties(line, 'clashes_penalty')
elif line.upper().startswith("RESTRAINTS"):
self.__parse_score_weights_penalties(line, 'restraints_penalty')
elif line.upper().startswith("DENSITY"):
self.__parse_score_weights_penalties(line, 'density_penalty')
elif line.upper().startswith("SYMMETRY"):
self.__parse_score_weights_penalties(line, 'symmetry_penalty')
elif line.upper().startswith("CHI2"):
self.__parse_score_weights_penalties(line, 'chi2_penalty')
elif line.upper().startswith("RG "):
self.__parse_score_weights_penalties(line, 'rg_penalty')
#parse mutation frequencies-----------------------------
elif line.upper().startswith("ROTATION_FREQ"):
self.mutation_frequencies.update( self.__parse_mutation_frequencies(line,'rotation_freq') )
elif line.upper().startswith("ROTATION_COV_FREQ"):
self.mutation_frequencies.update( self.__parse_mutation_frequencies(line,'rotation_cov_freq') )
elif line.upper().startswith("TRANSLATION_FREQ"):
self.mutation_frequencies.update( self.__parse_mutation_frequencies(line,'translation_freq') )
elif line.upper().startswith("TRANSLATION_ALL_FREQ"):
self.mutation_frequencies.update( self.__parse_mutation_frequencies(line,'translation_all_freq') )
elif line.upper().startswith("ROTATION_ALL_FREQ"):
self.mutation_frequencies.update( self.__parse_mutation_frequencies(line,'rotation_all_freq') )
elif line.upper().startswith("ROTATION_WHOLE_FREQ"):
self.mutation_frequencies.update( self.__parse_mutation_frequencies(line,'rotation_whole_freq') )
elif line.upper().startswith("EXCHANGE_FREQ"):
self.mutation_frequencies.update( self.__parse_mutation_frequencies(line,'exchange_freq') )
elif line.upper().startswith("EXCHANGESAMPLE_FREQ"):
self.mutation_frequencies.update( self.__parse_mutation_frequencies(line,'exchangeandsample_freq') )
elif line.upper().startswith("ROTATION_WHOLE_FREQ"):
self.__parse_mutation_frequencies(line,'rotation_whole_freq')
elif line.upper().startswith("SIMUL_DD_FREQ"):
self.mutation_frequencies.update( self.__parse_mutation_frequencies(line,'simul_dd_freq') )
elif line.upper().startswith("WRITE_N_ITER"):
self.__parse_positive_params(line,'niter', "int")
self.out_steps = []
elif line.upper().startswith("WRITE_EACHBETTER"):
self.__parse_bool_param(line, "write_eachbetter")
self.out_steps = []
elif line.upper().startswith("OUT_STEPS"):
self.__parse_outsteps(line)
elif line.upper().startswith("STRUCT_NR"):
self.__parse_positive_params(line,'struct_nr', "int")
elif line.upper().startswith("MAXROT"):
self.__parse_rot_angle(line)
elif line.upper().startswith("MAXTRANS"):
self.__parse_trans_vector(line)
elif line.upper().startswith("COMPONENT_REPRESENTATION"):
self.__parse_param_value(line, "representation", ["fa","ca", "cacb", "3p", "sphere", "ellipsoid"])
elif line.upper().startswith("SCALEPARAMS"):
self.__parse_param_value(line, "param_scaling", ["on", "off"])
elif line.upper().startswith("PARAMSCALINGRANGES"):
self.__parse_scaling(line, "param_scaling_ranges", 3)
elif line.upper().startswith("PARAMSCALINGR1"):
self.scaling_ranges.append( self.__parse_scaling(line, "param_scaling_range1", 2) )
elif line.upper().startswith("PARAMSCALINGR2"):
self.scaling_ranges.append( self.__parse_scaling(line, "param_scaling_range2", 2) )
elif line.upper().startswith("PARAMSCALINGR3"):
self.scaling_ranges.append( self.__parse_scaling(line, "param_scaling_range3", 2) )
elif line.upper().startswith("KVOL"):
self.kvol_given = True
self.__parse_kvol(line)
elif line.upper().startswith("THRESHOLD"):
self.threshold_given = True
self.__parse_positive_params(line, "threshold")
elif line.upper().startswith("SIMBOX"):
self.__parse_positive_params(line,'simbox', "float")
elif line.upper().startswith("CRYSOL_PATH"):
self.__parse_crysol_path(line)
elif line.upper().startswith("GRIDRADIUS"):
self.__parse_positive_params(line,'simboxradius', "float")
elif line.upper().startswith("SAXSRADIUS"):
self.__parse_positive_params(line,'saxsradius', "float")
elif line.upper().startswith("RG_VAL"):
self.__parse_positive_params(line,'rg_val', "float")
elif line.upper().startswith("GRIDTYPE"):
self.__parse_param_value(line, "simgridtype", ["cubic", "diamond"])
elif line.startswith("SIMMETHOD"):
self.__parse_param_value(line, "simmethod", ["SimulatedAnnealing", "Genetic", "ReplicaExchange"])
elif line.upper().startswith("REPLICAEXCHANGE_FREQ"):
self.__parse_positive_params(line, "replica_exchange_freq", "int")
self.repl_exch_freq = True
elif line.upper().startswith("MAXPOOLSIZE"):
self.__parse_positive_params(line, "maxpoolsize", "int")
elif line.upper().startswith("REPLICATEMPERATURES"):
self.__parse_values_list(line, "replica_temps")
elif line.upper().startswith("ANNTEMP"):
self.__parse_positive_params(line,'anntemp', "float")
elif line.upper().startswith("REDUCTMETHOD"):
self.__parse_param_value(line, "reductmethod", ["Roulette", "Tournament", "Cutoff"])
elif line.upper().startswith("MOVE_STATE"):
self.__parse_movestate(line)
elif line.upper().startswith("START_ORIENTATION"):
self.__parse_bool_param(line, "start_orient")
elif line.upper().startswith("IDENTIFY_DISORDERS"):
self.__parse_bool_param(line, "identify_disorders")
elif line.upper().startswith("COVALENT_BONDS"):
self.__parse_covalent_links(line)
fh.close()
#parameter replicaexchangefreq not defined in config file
if self.repl_exch_freq == False:
self.replica_exchange_freq = self.simul_steps/10
if self.replica_exchange_freq == 0: self.replica_exchange_freq = 1
self.__check_save_structnr(["struct_nr", "replica_exchange_freq"])
self.__check_shape_descriptors()
self.__check_mutual_clashes_options()
self.__check_mut_freq_correctness()
self.__set_outsave_mode()
self.save_logfile()
def __check_save_structnr(self, params):
"""
"""
for param in params:
if self.simul_steps == 0: pass
elif getattr(self, param) > self.simul_steps:
print getattr(self, param)
raise ConfigError(str(param).upper()+" value cannot be larger than number of simulations!")
def __check_mutual_clashes_options(self):
if self.required_clashes_penalty == True and self.required_clashes_penalty_allatoms == True:
raise ConfigError("Only one option can be applied CLASHES or CLASHES_ALLATOMS. \
Please change values into 0 for one of the parameters or comment one option in configuarion file")
def __check_shape_descriptors(self):
"""
"""
if self.shapedesc:
if self.kvol_given == False and self.threshold_given == False and self.shapedesc == "map":
raise ConfigError("You must provide kvol or threshold value when you provide density map as input")
if self.kvol_given and self.threshold_given:
raise ConfigError("Please provide only one of these two parameters: KVOL or THRESHOLD!")
if self.threshold != None:
self.kvol = None
if self.is_freespace_defined == False:
self.freespace_penalty = [0.,0.]
if self.is_density_defined == False:
self.density_penalty = [0.,0.]
if self.density_penalty[0] != 0. and self.freespace_penalty[0] != 0.:
print ConfigError("Scoring function will penalyze shape filling twice since you defined two parameters: DENSITY and MAP_FREESPACE!")
logfile.write_file("Scoring function will penalyze shape filling twice since you defined two parameters: DENSITY and MAP_FREESPACE!\n")
if self.simbox > 10:
raise ConfigError("The size of the system you want to use is very large. Max simbox value is 10")
#when no file with density map or ab initio model was provided but density filling or mapspace weights were provided
if self.shapedesc == False and self.is_density_defined == True: #and (self.density_penalty[0] != 0):
raise ConfigError("Map filling cannot be calculated when no shape descriptor was provided!\n")
logfile.write_file("Map filling cannot be calculated when no shape descriptor was provided!")
#print "@@@@@@", self.shapedesc, self.is_freespace_defined
if self.shapedesc == False and self.is_freespace_defined == True: #(self.freespace_penalty[0] != 0):
#print "****", self.shapedesc, self.is_freespace_defined
raise ConfigError("Map filling cannot be calculated when no shape descriptor was provided!")
logfile.write_file("Map filling cannot be calculated when no shape descriptor was provided!\n")
if self.shapedesc == "map" or self.shapedesc == "saxs": self.shapedesc = True
if self.is_chi2_defined == True or self.is_rg_penalty_defined == True:
if not self.curve:
raise ConfigError("To verify discrepancy with SAXS/SANS curves you must provide .dat file!")
else:
self.crysol_outfile = open("crysol_summary.txt", "w")
if self.representation == "sphere" and self.restraints_penalty[0] == 0 and self.restraints_penalty[1] == 0:
raise ConfigError ("To validate clashes between spheres PyRy3D calculates violation of distances betweeen\
spheres centres. To allow this option penalty for RESTRAINTS must be different than 0 0")
if self.identify_disorders == False and self.simul_dd_freq != 0.:
raise ConfigError ("You must allow PyRy3D to identify disorders to use simulate disorder mutation. Please set IDENTIFY_DISORDER parameter into True or disable simulate disorder mutation by setting SIMUL_DD_FREQ to 0 0 values")
def __check_mut_freq_correctness(self):
"""
"""
sumpar = float(self.rotation_freq + self.rotation_cov_freq + self.translation_freq + self.exchange_freq + self.exchangeandsample_freq\
+ self.simul_dd_freq + self.translation_all_freq + self.rotation_all_freq \
+ self.rotation_whole_freq)
if sumpar == 0: raise ConfigError("Frequencies of mutations must sum up to 1. You provided %s"%(sumpar))
if round(sumpar,1) > float(1.0):
raise ConfigError("Frequencies of mutations must sum up to 1. You provided %s"%(sumpar))
if round(sumpar,1) < float(1.0):
self.rotation_freq = self.rotation_freq/sumpar *1.0
self.rotation_cov_freq = self.rotation_cov_freq/sumpar *1.0
self.translation_freq = self.translation_freq/sumpar *1.0
self.exchange_freq = self.exchange_freq/sumpar *1.0
self.exchangesample_freq = self.exchangeandsample_freq/sumpar *1.0
self.simul_dd_freq = self.simul_dd_freq/sumpar *1.0
self.translation_all_freq = self.translation_all_freq/sumpar *1.0
self.rotation_all_freq = self.rotation_all_freq/sumpar *1.0
self.rotation_whole_freq = self.rotation_whole_freq/sumpar *1.0
def __set_outsave_mode(self):
"""
"""
if self.simul_steps == 0: pass
elif self.niter > self.simul_steps:
raise ConfigError("Steps to write cannot be larger than number of simulation steps!")
#self.out_steps = []
if self.out_steps and self.niter and self.write_eachbetter:
raise ConfigError("You can select only one of output save methods either WRITE_N_ITER or or WRITE_EACHBETTER or OUT_STEPS")
if self.out_steps : self.save_res_mode = "outsteps"
elif self.write_eachbetter : self.save_res_mode = "eachbetter"
elif self.niter : self.save_res_mode = "niter"
def save_logfile(self):
"""
saves all alphabetically sorted config attributes to logfile
"""
attribs = self.__dict__
for a in sorted(attribs.keys()):
logfile.write_file(str(a).upper()+"."*20+" "+str(attribs[a])+"\n")
def set_movehistory_file(self, movehist_file):
"""
"""
self.movehistory = movehist_file
class CovalentBond:
def __init__(self, chains, at1=None, at2=None):
self.atom1 = at1
self.atom2 = at2
self.chains = chains
self.chains_indexes = []
self.__check_atoms(self.atom1)
self.__check_atoms(self.atom2)
def __str__(self):
return "%s %s %s" % (self.atom1, self.atom2, self.chains)
def __check_atoms(self, atom):
if len(atom) != 2: raise ConfigError("You haven't provided enough information about covalent bonds atoms")
if not str(atom[0]).isdigit(): raise ConfigError ("Residue number should be number not string")
if not str(atom[1]).isalpha(): raise ConfigError ("Atom name must be string, not number")
|
[
"mateusz.dobrychlop@gmail.com"
] |
mateusz.dobrychlop@gmail.com
|
2da64711a5d90ce39dce1b13f6653c5df230d136
|
fcc83583ed505381c7e1e2ca961a54e52552e8b4
|
/scylla/providers/pubproxy_provider.py
|
a8bacaafebdaa91022adc689f232b03b0b343820
|
[
"Apache-2.0"
] |
permissive
|
imWildCat/scylla
|
959485585c5ed742ce1659f5305a46b6198c798d
|
46f6f6bd2eba6e49603d263209efc59b74badf07
|
refs/heads/main
| 2023-09-02T23:10:23.000529
| 2023-05-22T17:48:29
| 2023-05-22T17:48:29
| 128,911,431
| 3,898
| 566
|
Apache-2.0
| 2023-08-14T21:53:20
| 2018-04-10T09:55:11
|
Python
|
UTF-8
|
Python
| false
| false
| 286
|
py
|
from scylla.providers.plain_text_provider import PlainTextProvider
class PubproxyProvider(PlainTextProvider):
def urls(self) -> [str]:
return [
'http://pubproxy.com/api/proxy?limit=5&format=txt&type=http&level=anonymous&last_check=60&no_country=CN',
]
|
[
"songhou@advance.ai"
] |
songhou@advance.ai
|
cde5214a36a39b2137d3749a9e9e6e5bb14a2891
|
e56bba9d878de6cf3bb5fa39955606d5688c46c2
|
/.net/lib/python3.9/site-packages/aws_cdk/aws_cloudwatch/_jsii/__init__.py
|
504e0454e7246e627e3583773da9cce29f22a376
|
[] |
no_license
|
marrnik/network-pipeline
|
ce8132344f564c9e114602d0dc72e1c81e41d708
|
9715d51c187cd230c366fd1d3b8273435c5a7fe0
|
refs/heads/master
| 2023-03-16T06:29:34.787374
| 2021-03-10T23:43:48
| 2021-03-10T23:43:48
| 345,172,191
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
import aws_cdk.aws_iam._jsii
import aws_cdk.core._jsii
import constructs._jsii
__jsii_assembly__ = jsii.JSIIAssembly.load(
"@aws-cdk/aws-cloudwatch",
"1.92.0",
__name__[0:-6],
"aws-cloudwatch@1.92.0.jsii.tgz",
)
__all__ = [
"__jsii_assembly__",
]
publication.publish()
|
[
"marrapun@amazon.com"
] |
marrapun@amazon.com
|
76bd6b923760ccfa252ad5ce041eb33e2cb9fc88
|
b72f8c38be680d56b710a41fc7e34821523588b9
|
/iletisim/urls.py
|
831fc7524163572bc8215056b4fc17e153cc1c3d
|
[] |
no_license
|
gnyylm/ams
|
7c96eed3c568d23548d737485b4f97276f8d8d44
|
8c8f4b99b4aaceb903c87fef854e5e88519c642a
|
refs/heads/master
| 2020-04-24T20:55:18.856755
| 2019-03-03T19:14:19
| 2019-03-03T19:14:19
| 172,260,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 117
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name="iletisim"),
]
|
[
"gnyylm@gmail.com"
] |
gnyylm@gmail.com
|
6413fd233edd0ff1cd6d551d5f116647d2c3bd63
|
b961139f628cbda2221df78599f919847e9962b6
|
/[2020][modulo][Processamento_Linguagem_Natural]/codigos/venv2/bin/pip
|
a98340eaf9a3968d8ce912b3cdb5c05496b87bf1
|
[] |
no_license
|
rafaelstojoao/pos-unip
|
167fe60e752b1660172c71cd5a3607101b8b3d49
|
50d9711e313fdcac59fa89498b0a3ea5d004ac49
|
refs/heads/master
| 2022-12-21T14:20:36.790476
| 2020-10-25T11:52:41
| 2020-10-25T11:52:41
| 246,832,526
| 4
| 1
| null | 2022-12-19T10:12:14
| 2020-03-12T12:47:17
|
Python
|
UTF-8
|
Python
| false
| false
| 298
|
#!/mnt/DADOS/DOCS/Unip/[2020][modulo][Processamento_Linguagem_Natural]/codigos/venv2/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"rafaelstojoao@gmail.com"
] |
rafaelstojoao@gmail.com
|
|
ac4cec9c23d857374e16c812fac948e0c272797e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03108/s870352488.py
|
0b87a41dcc411c3fbc8ae14366e08bef4bb0f7fc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,652
|
py
|
import sys
import collections
readline = sys.stdin.readline
class UnionFind():
def __init__(self, n):
self.n = n
self.parents = [-1]*n
self.rank = [0]*n
self.size = [1]*n
def find(self, x):
if self.parents[x] < 0:
return x
else:
self.parents[x] = self.find(self.parents[x])
return self.parents[x]
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.rank[x] < self.rank[y]:
self.size[y] += self.size[x]
self.parents[x] = y
else:
self.size[x] += self.size[y]
self.parents[y] = x
if self.rank[x] == self.rank[y]:
self.rank[x] += 1
def msize(self, x):
return -self.size[self.find(x)]
def main():
N, M = map(int, readline().split())
nodelist = []
for _ in range(M):
A, B = map(int, readline().split())
A -= 1; B -= 1
nodelist.append((A, B))
uf = UnionFind(N)
anstmp = (N*(N-1))//2
anslist = [anstmp]
for _ in range(M):
node = nodelist.pop()
n0 = uf.find(node[0])
n1 = uf.find(node[1])
if n0 != n1:
n0size = uf.size[n0]
n1size = uf.size[n1]
else:
n0size = 0; n1size = 0
uf.union(node[0], node[1])
anstmp = anslist[-1]
ans = anstmp - n0size*n1size
anslist.append(ans)
anslist = anslist[:-1]
for _ in range(len(anslist)):
ans = anslist.pop()
print(ans)
if __name__ == "__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
a22c4a5d4c713c99170baa66626b92928dfab26e
|
3acdc5bd817913a2eed491ea58087a9c9d2d5bec
|
/100/thirties/thirty-two.py
|
2574190641982c2bcd8b90e29168be3b119f2b56
|
[] |
no_license
|
o-90/project-euler
|
a6a1f968107803bdf75f5835bf0ae1e670158599
|
f64f9b600b41b2c2af8aaaae68eafd7fe48374c9
|
refs/heads/master
| 2023-01-27T12:57:48.921347
| 2017-01-23T15:43:04
| 2017-01-23T15:43:04
| 49,388,553
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
# -*- coding: utf-8 -*-
from itertools import permutations
# 32)
# We shall say that an n-digit number is pandigital if it makes use of all the
# digits 1 to n exactly once; for example, the 5-digit number, 15234, is 1
# through 5 pandigital.
#
# The product 7254 is unusual, as the identity, 39 × 186 = 7254, containing
# multiplicand, multiplier, and product is 1 through 9 pandigital.
#
# Find the sum of all products whose multiplicand/multiplier/product identity
# can be written as a 1 through 9 pandigital.
def get_prod(n, k=1):
"""
"""
str_num = str(n)
prod = int(str_num[5:])
if int(str_num[:k]) * int(str_num[k:5]) == prod:
return prod
else:
return 0
perms = [''.join(str(i) for i in x) for x in permutations(range(1, 10), 9)]
ans = []
for k in xrange(1, 5):
for perm in perms:
ans.append(get_prod(perm, k))
ans = sum(list(set(ans)))
print ans # 45228
|
[
"john.r.martinez14@gmail.com"
] |
john.r.martinez14@gmail.com
|
9d31dd701cf90d929170893cddab05db06011ba7
|
c4544c22c0618451746795090e07c80bc85a0877
|
/file_upload/article/forms.py
|
fd00ffba0492b96c7d39b7f2448d488bfccf1d67
|
[] |
no_license
|
RelaxedDong/Django_course
|
35f7027dc552ad148d2dc8679a19a1ffb12b8d14
|
2965089d15e4c80cd6402d362ee37f8cc675c08b
|
refs/heads/master
| 2022-01-09T14:28:40.503099
| 2019-05-24T07:07:03
| 2019-05-24T07:07:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
#encoding:utf-8
# __author__ = 'donghao'
# __time__ = 2019/5/13 21:52
from django import forms
from .models import Book
from django.core import validators
class BookForm(forms.ModelForm):
cover_url = forms.FileField(validators=[validators.FileExtensionValidator(allowed_extensions=['jpg','jpeg'])])
class Meta:
model = Book
fields = ['title','cover_url']
|
[
"1417766861@qq.com"
] |
1417766861@qq.com
|
bbb6268281ee09c15af62c26d0df2d1c6065e735
|
f9d5bc590bd6c6274d7a6efec0f60cac1d8286b2
|
/assets/coins/monero/moneroImportPrices.py
|
6a92df9ceca004c233c3ecc5ce2799c0931dad42
|
[] |
no_license
|
pr0logas/grepblockBEpython
|
35c83c1bf2114fc9417bedff6cf2a6e2ad2e667e
|
bbeaa290d13d80f993d843c7f1dbbfd373eee332
|
refs/heads/master
| 2022-10-03T23:35:44.600740
| 2020-03-09T08:24:53
| 2020-03-09T08:24:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
#:: By GrepBlock.com developers // pr0logas
#:: Modified date: 2019-11-30
#:: Description: This file is a workspace for Prices importation.
import sys, time
from time import gmtime, strftime
from monero import *
sys.path.append('../../../')
from mongoDB import *
from parsePrices import parseCoinGeckoPrices
db = database
col = collectionForPricesUSD
# Init Classes;
MC = mongoConnection(mongoAuth, db, col)
PP = parseCoinGeckoPrices(apiProvider, vsCurrencyUSD, assetName)
# CoinGecko
result = PP.parsePrice()
# Insert Unix Time
aggregatedData = PP.aggregateInsertUnixTime(result)
#Insert to MongoDB
res = MC.insertPricesData(collectionForPricesUSD, aggregatedData)
timeSet = strftime("%Y-%m-%d %H:%M:%S", gmtime())
print timeSet + " Succefully inserted asset price: $" + res
|
[
"tomas@dappradar.com"
] |
tomas@dappradar.com
|
f6d2ffae909f5992e8ceea3bdc223d04acc73d4b
|
2c3da6e0bddf55d64d650040bbf286c47b31811a
|
/学习路线/1.python基础/day05/02-for-else语句.py
|
ec56422c4833eede814e9a25e0dca957f39f600e
|
[
"MIT"
] |
permissive
|
Bngzifei/PythonNotes
|
76bd53db3033a9c51ab4bdd727842cd89607b584
|
01590e1b6c1bc0f04aa2d355fa2553c04cce27f2
|
refs/heads/master
| 2023-02-04T06:49:00.725463
| 2020-12-15T09:26:40
| 2020-12-15T09:26:40
| 155,154,662
| 1
| 2
|
MIT
| 2020-09-08T01:30:19
| 2018-10-29T05:02:48
|
Python
|
UTF-8
|
Python
| false
| false
| 670
|
py
|
list1 = ["zhansan", "lisi1", 'ww']
# for name in list1: # 运行2次,出现逻辑错误
# if name == 'lisi':
# print('找到')
# else:
# print("没有找到")
"""当for执行完成后,默认for后面的else都会执行一次,如果不想让for后面的else执行,在for里面写个break"""
for name in list1: # 批量查找数据 if ... in...(判断有没有,True或False) 判断有没有我要的那个并返回(因为后续要用这个返回的)用for(break) else (判断有没有我要的那个)
if name == 'lisi':
print('找到')
break
else:
print('没找到')
# for ...else ... 是一个循环体内的.用于批量查找并返回一次提示信息
|
[
"bngzifei@gmail.com"
] |
bngzifei@gmail.com
|
cfa3dd642fe26db896b36347b90e698533b6c023
|
097ab159765aef32c7d05a1bc21b1abb3bc6f520
|
/orders/admin.py
|
7fbc0af18bb6ea0b0e96de50364052263b476adb
|
[] |
no_license
|
vijayrajmane/Pinocchio-s-Pizza-Django
|
ea42ca11e9e29b077a8f38f6f5b3fa754f5c3cb8
|
f8c26c7e15d5b0c634e536c01fc7f1de3b17fe27
|
refs/heads/master
| 2022-11-28T14:12:04.455263
| 2020-08-05T12:26:06
| 2020-08-05T12:26:06
| 284,448,929
| 0
| 0
| null | 2020-08-05T12:24:44
| 2020-08-02T11:34:28
|
Python
|
UTF-8
|
Python
| false
| false
| 512
|
py
|
from django.contrib import admin
from .models import pizza_category,pizza,topping,sub,salad,pasta,dinner_platter,Category,user_order,Order_counter,order
# Register your models here.
admin.site.register(pizza)
admin.site.register(pizza_category)
admin.site.register(topping)
admin.site.register(sub)
admin.site.register(salad)
admin.site.register(pasta)
admin.site.register(dinner_platter)
admin.site.register(Category)
admin.site.register(user_order)
admin.site.register(order)
admin.site.register(Order_counter)
|
[
"vijay.rajmane77@gmail.com"
] |
vijay.rajmane77@gmail.com
|
e44931d7645146a5f5ccd3e49417ae4ec7fc5af8
|
1115935baf990eaa474e6bd66b090e9e226432d2
|
/hujan_ui/maas/vlans/forms.py
|
ad5564c7e1b989238f54ed9e1472d85c8f09ad05
|
[
"Apache-2.0"
] |
permissive
|
hasanasari/hujan_ui
|
3cea8159622152c6cfef1949c8ef574091556d60
|
90dd7545478662a3e648507f36cb7079109be442
|
refs/heads/main
| 2023-03-19T01:41:27.675866
| 2021-03-03T03:29:52
| 2021-03-03T03:29:52
| 351,787,722
| 1
| 0
|
Apache-2.0
| 2021-03-26T13:16:04
| 2021-03-26T13:16:03
| null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
from django import forms
from hujan_ui import maas
class BaseVlanForm(forms.Form):
fabric_id = forms.ChoiceField(required=True,label='Fabric',)
space = forms.ChoiceField(required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['fabric_id'].choices = self.get_choice_fabric()
self.fields['space'].choices = self.get_choice_space()
def get_choice_fabric(self):
fabrics = maas.get_fabrics()
choices = [(x['id'], x['name']) for x in fabrics]
choices.insert(0, (None, '-----'))
return choices
def get_choice_space(self):
space = maas.get_spaces()
choices = [(x['id'], x['name']) for x in space]
choices.insert(0, (None, '-----'))
return choices
class VlanForm(forms.Form):
name = forms.CharField(required=True)
vid = forms.CharField(required=True)
fabric_id = forms.ChoiceField(required=True,label='Fabric',)
space = forms.ChoiceField(required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['fabric_id'].choices = self.get_choice_fabric()
self.fields['space'].choices = self.get_choice_space()
def get_choice_fabric(self):
fabrics = maas.get_fabrics()
choices = [(x['id'], x['name']) for x in fabrics]
choices.insert(0, (None, '-----'))
return choices
def get_choice_space(self):
space = maas.get_spaces()
choices = [(x['id'], x['name']) for x in space]
choices.insert(0, (None, '-----'))
return choices
class VlanEditForm(BaseVlanForm):
name = forms.CharField(required=False)
vid = forms.CharField(required=True)
mtu = forms.IntegerField(required=False)
description = forms.TextInput()
|
[
"arvanria@gmail.com"
] |
arvanria@gmail.com
|
cc8316692d63ad5a54a62873075af4ebc3c51ddf
|
34b9b39442bde1a3c8fa670ef60bcc84d772a067
|
/Assignment 4 - Deadline 17 oct 2017/wallCalculations_MARRUGO.py
|
a00655f65a7afe19d116f4477dd8fb5e2329c415
|
[] |
no_license
|
bnajafi/Scientific_Python_Assignments_POLIMI_EETBS
|
b398fc2754b843d63cd06d517235c16177a87dcf
|
8da926e995dcaf02a297c6bb2f3120c49d6d63da
|
refs/heads/master
| 2021-05-07T22:36:14.715936
| 2018-01-16T21:12:33
| 2018-01-16T21:12:33
| 107,265,075
| 38
| 86
| null | 2018-01-16T21:12:34
| 2017-10-17T12:24:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,174
|
py
|
# -*- coding: utf-8 -*-
# Assigment 4 Functions for Calculation of the overall unit thermal resistance (the R-value)
# and the overall heat transfer coefficient (the U-factor) in parallel and series
# Librery of materials
material={"Inside_surface":0.12,"Outside_surface_winter": 0.030,
"Outside_surface_summer": 0.044,"Common_brick":0.12,
"Wood_bevel_lapped_siding": 0.14,"Wood_fiberboard_sheeting":0.23,
"Glass_fiber_insulation":2.52,"Wood stud":0.63,"Gypsum_wallboard":0.079,"Wood":0.44,"Asphalt_shingle_roofing":0.077,"Plywood":0.11}
def matSeries(List):
"""Function designed for calculate the overall unit thermal resistance (the R-value)
# and the overall heat transfer coefficient (the U-factor) for a element with all materials in series"""
# Adding the thermal resistance for the inside and outside surface for winter and summer
Winter=["Inside_surface","Outside_surface_winter"]
Summer=["Inside_surface","Outside_surface_summer"]
# Defining the total list of materials in series for winter and summer
LSwinter=List+Winter
LSsummer=List+Summer
# Defining the variables for the total unit thermal resistance in series and parallel
Rtotals=0
Rtotalw=0
# Defining the variable for the unit thermal resistance of each material
Rvalues_layers={}
# Defining the for cycle to acquired and compute the unit thermal resistance for
# Summer season
for anylayer in LSsummer:
# Acquiring the thermal resistance for the specific material of the list in [m2*ºC/W]
Rvalue_layer=material[anylayer]
# Computing the sum of each unit thermal resistance of each material in series in [m2*ºC/W]
Rtotals=Rtotals+Rvalue_layer
# Saving the value of each unit thermal resistance of each material in [m2*ºC/W]
Rdict={anylayer:Rvalue_layer}
Rvalues_layers.update(Rdict)
for anylayer in LSwinter:
# Acquiring the thermal resistance for the specific material of the list in [m2*ºC/W]
Rvalue_layer=material[anylayer]
# Computing the sum of each unit thermal resistance of each material in series in [m2*ºC/W]
Rtotalw=Rtotalw+Rvalue_layer
# Saving the value of each unit thermal resistance of each material in [m2*ºC/W]
Rdict={anylayer:Rvalue_layer}
Rvalues_layers.update(Rdict)
# Computing the overall heat transfer coefficient of the wall according with its
# respectively U-factor and area percentages for the list of materials
# in series and parallel in winter and summer [W/m2*ºC]
Uoverallw=(1/Rtotalw)
Uoveralls=(1/Rtotals)
Results={"Resistances":Rvalues_layers,"Rtotal_Winter":Rtotalw,"Utotal_winter":Uoverallw,"Rtotal_Summer":Rtotals,"Utotal_Summer":Uoveralls}
return Results
def matParallel(ListS,ListP,percentage):
"""Function designed for calculate the overall unit thermal resistance (the R-value)
# and the overall heat transfer coefficient (the U-factor) for a element with materials in parallel"""
# Adding the thermal resistance for the inside and outside surface for winter and summer
Winter=["Inside_surface","Outside_surface_winter"]
Summer=["Inside_surface","Outside_surface_summer"]
# Defining the total list of materials in series for winter and summer
LSwinter=ListS+Winter
LSsummer=ListS+Summer
# Defining the total list of materials in parallel for winter and summer
LPwinter=ListP+Winter
LPsummer=ListP+Summer
# Defining lenght of list of materials
x=len(LSwinter)
# Defining an accounter
y=0
# Defining the total list of materials for winter and summer
ListWinter=LSwinter+LPwinter
ListSummer=LSsummer+LPsummer
# Defining the variables for the total unit thermal resistance in series and parallel
Rtotals=0
Rtotalp=0
# Defining the variable for the unit thermal resistance of each material
Rvalues_layers={}
# Defining the for cycle to acquired and compute the unit thermal resistance for
# Summer season
for anylayer in ListSummer:
# Acquiring the thermal resistance for the specific material of the list in [m2*ºC/W]
Rvalue_layer=material[anylayer]
if y<x:
# Computing the sum of each unit thermal resistance of each material in series in [m2*ºC/W]
Rtotals=Rtotals+Rvalue_layer
# Saving the value of each unit thermal resistance of each material in [m2*ºC/W]
Rdict={anylayer:Rvalue_layer}
Rvalues_layers.update(Rdict)
else:
# Computing the sum of each unit thermal resistance of each material in parallel in [m2*ºC/W]
Rtotalp=Rtotalp+Rvalue_layer
# Saving the value of each unit thermal resistance of each material in [m2*ºC/W]
Rdict={anylayer:Rvalue_layer}
Rvalues_layers.update(Rdict)
# Counting
y=y+1
# Computing the overall heat transfer coefficient of the wall according with its
# respectively U-factor and area percentages for the list of materials
# in series and parallel in summer [W/m2*ºC]
UoverallS=(1/Rtotals)*(float(percentage))+(1/Rtotalp)*(1-float(percentage))
# Computing the overall unit thermal resistance for the wall in summer [m2*ºC/W]
RoverallS=1/UoverallS
# Reset of accounter variables
Rtotals=0
Rtotalp=0
y=0
# Defining the for cycle to acquired and compute the unit thermal resistance for
# Winter season
for anylayer in ListWinter:
# Acquiring the thermal resistance for the specific material of the list in [m2*ºC/W]
Rvalue_layer=material[anylayer]
if y<x:
# Computing the sum of each unit thermal resistance of each material in series in [m2*ºC/W]
Rtotals=Rtotals+Rvalue_layer
# Saving the value of each unit thermal resistance of each material in [m2*ºC/W]
Rdict={anylayer:Rvalue_layer}
Rvalues_layers.update(Rdict)
else:
# Computing the sum of each unit thermal resistance of each material in parallel in [m2*ºC/W]
Rtotalp=Rtotalp+Rvalue_layer
# Saving the value of each unit thermal resistance of each material in [m2*ºC/W]
Rdict={anylayer:Rvalue_layer}
Rvalues_layers.update(Rdict)
# Counting
y=y+1
# Computing the overall heat transfer coefficient of the wall according with its
# respectively U-factor and area percentages for the list of materials
# in series and parallel in winter [W/m2*ºC]
UoverallW=(1/Rtotals)*(float(percentage))+(1/Rtotalp)*(1-float(percentage))
# Computing the overall unit thermal resistance for the wall winter [m2*ºC/W]
RoverallW=1/UoverallW
# Returning of the results
Results={"Resistances":Rvalues_layers,"Rtotal_Winter":RoverallW,"Utotal_winter":UoverallW,"Rtotal_Summer":RoverallS,"Utotal_Summer":UoverallS}
return Results
|
[
"nicolasmarrugo93@gmail.com"
] |
nicolasmarrugo93@gmail.com
|
a72ef93895eefdf271fe78bdf774e556db7d501f
|
53963fb073afb4719e60fad3d4b1ed94326559b3
|
/wordcount/urls.py
|
6af4dbaae5e584f262af2bf2ea7503b3fdc7519c
|
[] |
no_license
|
harshaldesai01/wordcount-project
|
5754f030a418a43e35a763110a3263e155ef4a1f
|
01c59f3f10eeecbd6157fee506fd3a22a596ec10
|
refs/heads/master
| 2020-05-24T16:38:24.641312
| 2019-07-13T17:33:52
| 2019-07-13T17:33:52
| 187,365,060
| 0
| 0
| null | 2019-07-13T17:29:29
| 2019-05-18T14:04:16
|
Python
|
UTF-8
|
Python
| false
| false
| 215
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.homepage, name='home'),
path('count/', views.count, name='count'),
path('about/', views.about, name='about'),
]
|
[
"harshaldesai01@gmail.com"
] |
harshaldesai01@gmail.com
|
e5029b3854dbaef24fb6cce6c6025ff4d71cca34
|
e8e2f3cb21e3f3c289b890dcf3cde567bb92dc32
|
/venv/bin/chardetect
|
a471d60fdc696af75d4b511e1d3b9a0af3f271c1
|
[] |
no_license
|
Timur597/Feliz
|
a0071b93a87eab015dd205e14cba88bcb5f34926
|
6f712ded791c84dee71f75934fb77d0ae101f5e6
|
refs/heads/master
| 2023-05-27T15:54:54.782528
| 2021-06-09T16:34:45
| 2021-06-09T16:34:45
| 373,058,036
| 0
| 1
| null | 2021-06-09T16:47:59
| 2021-06-02T06:07:12
|
Python
|
UTF-8
|
Python
| false
| false
| 262
|
#!/home/timur/PyCharmProjects/feeliz-master/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"khalilov.timur97@mail.ru"
] |
khalilov.timur97@mail.ru
|
|
4484b30afa060356763fe6262633868b534151bc
|
58d91b99f252ce924906bf0bbe054bf3b9706f74
|
/utils/data.py
|
e76a2f9af02635874507c42545a41849050e2792
|
[
"MIT"
] |
permissive
|
rnoxy/ProjectDL2-Segmentation
|
c84df2da1f441bdfe674444d222c76405b6262fa
|
94c1ff41e770e56a01f115f64721173c66073f50
|
refs/heads/master
| 2022-11-11T18:24:56.890658
| 2020-06-18T10:55:08
| 2020-06-18T10:55:08
| 273,213,659
| 0
| 0
|
MIT
| 2020-06-18T10:54:09
| 2020-06-18T10:54:09
| null |
UTF-8
|
Python
| false
| false
| 4,903
|
py
|
from base.data_preprocessing import BaseDataPreprocessing
import tensorflow as tf
import os
import numpy as np
from utils.types import Datapoint
from dataclasses import dataclass
from typing import Sequence, Dict, Callable, Any
import math
DATASET_SIZE = 2913
"""
Splits: 70%/15%/15%
"""
SUBSET_SIZES = {
"train": int(0.7 * DATASET_SIZE),
"valid": int(0.15 * DATASET_SIZE),
"test": DATASET_SIZE - int(0.7 * DATASET_SIZE) - int(0.15 * DATASET_SIZE)
}
def color_map(n):
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
indices = set(range(n)) | {255}
cmap = np.zeros((len(indices), 3), dtype=np.uint8)
for i in indices:
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7 - j)
g = g | (bitget(c, 1) << 7 - j)
b = b | (bitget(c, 2) << 7 - j)
c = c >> 3
if i == 255:
i = -1
cmap[i] = np.array([r, g, b])
return cmap
N_PROPER_CLASSES = 21
N_CLASSES = N_PROPER_CLASSES + 1
CMAP = color_map(N_PROPER_CLASSES)
def cmap_to_one_hot(img):
label = tf.equal(img[:, :, None, :], CMAP[None, None, :, :])
label = tf.reduce_all(label, axis=3)
label = tf.cast(label, tf.uint8)
return label
def indices_to_cmap(indices):
return tf.gather(CMAP, indices, axis=0)
def get_train_batch_count(config) -> int:
batch_size = config.data.get("batch_size", 1)
return math.ceil(SUBSET_SIZES["train"] / batch_size)
@dataclass
class Split:
split: Callable[[tf.data.Dataset], tf.data.Dataset]
preprocessing: Callable[[Datapoint], Any]
def get_train_valid_data(config, preprocessing: BaseDataPreprocessing) -> Dict[str, tf.data.Dataset]:
root = os.path.join(config.data.get("data_dir", "data"), "VOCdevkit", "VOC2012")
splits = {
"train": Split(
split=lambda ds: ds.take(SUBSET_SIZES["train"]),
preprocessing=lambda datapoint: preprocessing.preprocess_train(datapoint),
),
"valid": Split(
split=lambda ds: ds.skip(SUBSET_SIZES["train"]).take(SUBSET_SIZES["valid"]),
preprocessing=lambda datapoint: preprocessing.preprocess_valid(datapoint),
),
}
batch_size = config.data.get("batch_size", 1)
dataset = _create_dataset(
root,
_get_filenames(root, "trainval"),
splits,
config.data.get("workers", None),
batch_size,
)
if config.data.shuffle:
dataset["train"] = dataset["train"].shuffle(
config.data.get("shuffle_buffer_size", get_train_batch_count(config)))
if config.data.prefetch:
dataset = {k: ds.prefetch(config.data.get("prefetch_buffer_size", 50)) for k, ds in dataset.items()}
return dataset
def get_test_data(config, preprocessing: BaseDataPreprocessing) -> Dict[str, tf.data.Dataset]:
root = os.path.join(config.data.get("data_dir", "data"), "VOCdevkit", "VOC2012")
splits = {"test": Split(
split=lambda ds: ds.skip(SUBSET_SIZES["train"] + SUBSET_SIZES["valid"]),
preprocessing=lambda datapoint: preprocessing.preprocess_valid(datapoint),
)}
dataset = _create_dataset(
root,
_get_filenames(root, "trainval"),
splits,
config.data.get("workers"),
config.data.get("batch_size", 1),
)
if config.data.prefetch:
dataset["test"] = dataset["test"].prefetch(config.data.get("prefetch_buffer_size", 10))
return dataset
def _get_filenames(root, split):
path = os.path.join(root, "ImageSets", "Segmentation", split + ".txt")
with open(path) as f:
filenames = [line.strip() for line in f.readlines()]
return filenames
def _create_dataset(
root: str,
filenames: Sequence[str],
splits: Dict[str, Split],
workers: int,
batch_size: int,
) -> Dict[str, tf.data.Dataset]:
def gen():
yield from filenames
dataset = tf.data.Dataset.from_generator(gen, output_types=tf.string)
split_datasets = {}
for name, s in splits.items():
def load_and_preprocess(filename):
datapoint = _load_sample(root, filename)
return s.preprocessing(datapoint)
split_datasets[name] = s.split(dataset) \
.map(load_and_preprocess, num_parallel_calls=workers) \
.batch(batch_size)
return split_datasets
def _load_sample(root: str, filename: tf.Tensor) -> Datapoint:
image_path = tf.strings.join([root, "JPEGImages", filename + ".jpg"], separator=os.sep)
label_path = tf.strings.join([root, "SegmentationClass", filename + ".png"], separator=os.sep)
image = tf.io.read_file(image_path)
label = tf.io.read_file(label_path)
image = tf.image.decode_jpeg(image, channels=3)
label = tf.image.decode_png(label, channels=3)
label = cmap_to_one_hot(label)
return Datapoint(filename, image, label)
|
[
"jawor403@gmail.com"
] |
jawor403@gmail.com
|
059bcedd66f7ef96cab189652c770b168fc24da1
|
0d0711c5588e80995d83cfd1fa814d8031f591a6
|
/offer/cut_rope.py
|
bb70a92793eef8546003539c57593d7c2c8027a9
|
[] |
no_license
|
lxlscut/leecode
|
f215a85e42904cf40c6ff83b04ac1e0b9cd12ee0
|
121256d6d488869dcd19e0df3fe3142206cad6dc
|
refs/heads/master
| 2022-12-26T15:11:39.888532
| 2020-10-12T03:13:32
| 2020-10-12T03:13:32
| 303,265,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
def cut_rope(length):
max = 0
# cut成m段
for i in range(2, length + 1):
# 取整数部分
a = length // i
# 取余数部分
b = length % i
val = a**(i-b)*(a+1)**b
if val>max:
max = val
return max
if __name__ == '__main__':
res = cut_rope(2)
print(res)
|
[
"2684017046@qq.com"
] |
2684017046@qq.com
|
8edf7add9dd89a5a59c9d84008f56f0adbe83abc
|
b7b40fffd7d192b89a7ad3bdb791a7dbd072ac64
|
/axelrod/tests/test_memoryone.py
|
44167991b5bf6387399275371a16858e90bad540
|
[
"MIT"
] |
permissive
|
DEFALT303/Axelrod
|
f91911ad7a404c30edfef38afd02319fcd12bc15
|
e59fc40ebb705afe05cea6f30e282d1e9c621259
|
refs/heads/master
| 2020-09-24T08:39:49.107919
| 2015-04-16T16:15:42
| 2015-04-16T16:15:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,640
|
py
|
"""Test for the memoryone strategies."""
import random
import axelrod
from test_player import TestPlayer
class TestWinStayLostShift(TestPlayer):
name = "Win-Stay Lose-Shift"
player = axelrod.WinStayLoseShift
def test_strategy(self):
"""Starts by cooperating"""
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
def test_effect_of_strategy(self):
"""Check that switches if does not get best payoff."""
P1 = self.player()
P2 = axelrod.Player()
P1.history = ['C']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'C')
class TestGTFT(TestPlayer):
name = "Generous Tit-For-Tat"
player = axelrod.GTFT
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
class TestStochasticCooperator(TestPlayer):
name = "Stochastic Cooperator"
player = axelrod.StochasticCooperator
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(15)
# With probability .065 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(1)
# With probability .229 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(3)
# With probability .266 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
random.seed(13)
# With probability .42 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
class TestStochasticWSLS(TestPlayer):
name = "Stochastic WSLS"
player = axelrod.StochasticWSLS
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
class TestZDChi(TestPlayer):
name = "ZDChi"
player = axelrod.ZDChi
stochastic = True
def test_four_vector(self):
P1 = self.player()
expected_dictionary = {('C', 'D'): 0.5, ('D', 'C'): 0.75, ('D', 'D'): 0.0, ('C', 'C'): 1.1666666666666667}
for key in sorted(expected_dictionary.keys()):
self.assertAlmostEqual(P1._four_vector[key],
expected_dictionary[key])
def test_strategy(self):
# Testing the expected value is difficult here so these just ensure that
# future changes that break these tests will be examined carefully.
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
|
[
"vincent.knight@gmail.com"
] |
vincent.knight@gmail.com
|
057d9202a1f973f830d044a22df3ad832451bb6b
|
1aab2661491a7241096d75471aaa51c2a3f794f0
|
/para_parsing/main.py
|
18eb9a980088722380c8f0372bd1cd12bc1a2bde
|
[] |
no_license
|
UdaySwami/qa_mapper
|
42b1e2d276b90106207153c5ce5991deccc0b851
|
4021e871da3d1e389f034c2a28dcc8f7b9a1f71a
|
refs/heads/master
| 2022-11-05T00:52:51.857567
| 2020-06-23T09:57:32
| 2020-06-23T09:57:32
| 274,156,462
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
import os
from qa_mapper.para_parsing.file_parser import Parser
# from qa_mapper.para_parsing import Parser
import nltk
nltk.data.path.append(os.path.abspath(os.path.curdir))
# nltk.download('punkt')
# nltk.download('averaged_perceptron_tagger')
class QAMapper:
def __init__(self, paragraph_file_name):
self.parser = Parser(paragraph_file_name)
def tokenize_questions(self):
word_tokenize_questions = [nltk.word_tokenize(q) for q in self.parser.questions]
self.token_questions = [nltk.pos_tag(q) for q in word_tokenize_questions]
def tokenize_sentences(self):
word_tokenize_sentences = [nltk.word_tokenize(s) for s in self.parser.answer_sentences]
self.token_sentences = [nltk.pos_tag(s) for s in word_tokenize_sentences]
def find_answers(self):
# Tokenize questions and sentences from paragraph to find similarity
self.tokenize_questions()
self.tokenize_sentences()
self.qa_mapper = {}
for i in range(len(self.token_questions)):
q = self.token_questions[i]
original_question = self.parser.questions[i]
max_similarity = 0
for j in range(len(self.token_sentences)):
s = self.token_sentences[j]
original_sentence = self.parser.answer_sentences[j]
similarity = 0
for w in q:
if w in s:
similarity = similarity + 1
# Question matching max similarity with sentence will contain answer to it
if similarity >= max_similarity:
self.qa_mapper[original_question] = (original_sentence, self.parser.get_answer_from_sentence(original_sentence))
max_similarity = similarity
for q in self.qa_mapper:
s,a = self.qa_mapper[q]
print("Answer to Question %s is: %s" % (q,a))
if __name__ == '__main__':
paragraph_file_name = "para"
# paragraph_file_name = "sachin_tendulkar_test_para"
mapper = QAMapper(paragraph_file_name)
mapper.find_answers()
|
[
"uday.swami@druva.com"
] |
uday.swami@druva.com
|
fea402ed06f40785cacbf954f34865f10e62de55
|
76dba08689db40edf2d01a98856fa2a20d98d679
|
/甲鱼python/课程代码/第11讲/第11讲课后作业.py
|
f38d6087bebb08ecebe94960c7ce4388591454c7
|
[] |
no_license
|
pangfeiyo/PythonLearn
|
ce0747d75b53eb21acb6199acfe10934778420b2
|
b514b3d7baa62fa7b801d26ff49266f02cb9cbd2
|
refs/heads/master
| 2021-05-11T10:20:14.818774
| 2020-01-16T15:47:16
| 2020-01-16T15:47:16
| 118,096,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
# 从列表末尾取出一个元素,并将这个元素插入列表最前边
member = ['一','甲鱼','玩笑']
member.insert(0,member.pop())
print(member)
#python支持负数下标,列表最后一个元素为-1
list2 = [1,3,2,9,7,8]
print(list2[-3:-1])
#切片和赋值的区别
#切片相当于复制
sy1 = [1,3,2,9,7,8]
sy2 = sy1[:] #切片复制sy1的内容给sy2
sy3 = sy1 #sy1赋值给sy3
sy1.sort() #对sy1进行大小排序
print('sy1:',sy1)
print('sy2:',sy2)
print('sy3:',sy3)
|
[
"35551631+pangfeiyo@users.noreply.github.com"
] |
35551631+pangfeiyo@users.noreply.github.com
|
0968ad217bcbd03cf908a4a1ff9df1b5fa2ba861
|
1c03f99fe47a528254a3b579f78714d914d6b893
|
/classes/entity/Size.py
|
1517182b4c82122b3d4530821a028338fc0aac92
|
[] |
no_license
|
yuryprokashev/dress-plot
|
8ae569d2c445008fafbde235bdb117cac0134b00
|
10f240dcc8678b3465d9baa4842bf6faf41ef613
|
refs/heads/master
| 2020-04-08T04:30:01.074591
| 2018-12-08T09:48:15
| 2018-12-08T09:48:15
| 159,019,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
class Size(object):
def __init__(self,
half_neck,
half_chest,
half_waist,
half_hip,
back_till_waist,
full_length,
full_shoulder):
self.half_neck = half_neck
self.half_chest = half_chest
self.half_waist = half_waist
self.half_hip = half_hip
self.back_till_waist = back_till_waist
self.full_length = full_length
self.full_shoulder = full_shoulder
|
[
"yury.prokashev@gmail.com"
] |
yury.prokashev@gmail.com
|
1950e1b780b8b85a152cd64094e8bcc0f574803a
|
04d3a5621916e0538fccf15d61b68b83566c6bdf
|
/Assignment_2/Rides/rideservice/models.py
|
aa96364c25b322a1da27128d1358fb6423cedfd6
|
[] |
no_license
|
rachana-dani/Rideshare-CC-S6-2020
|
03a1e6515944a1edea5ef10ec457e7210d442a56
|
c243b5eaacfc99c3c0a412e054121c8139ebaec4
|
refs/heads/master
| 2022-07-29T10:35:40.379545
| 2020-05-14T06:50:45
| 2020-05-14T06:50:45
| 263,821,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
from django.db import models
from django.utils import timezone
import datetime
class Ride(models.Model):
ride_id = models.IntegerField(primary_key=True)
created_by = models.CharField(max_length=50)
timestamp = models.DateTimeField(max_length=30)
source = models.IntegerField()
destination = models.IntegerField()
def __str__(self):
return str(self.ride_id)
class User_rides(models.Model):
username = models.CharField(max_length=50)
ride_id = models.ForeignKey(Ride, on_delete=models.CASCADE)
class Meta:
unique_together = ['username', 'ride_id']
def __str__(self):
return str(self.username)+str(self.ride_id)
|
[
"rachnadani.rd@gmail.com"
] |
rachnadani.rd@gmail.com
|
88945a0a1f5a054699efb8b22abb6da368245cad
|
5576dffdcd0f6fcd3ca54041c7a7f82e5911f06f
|
/src/modules/linear.py
|
bd9f3009e0a7a787b38aa2da9c16e586f495252a
|
[
"MIT"
] |
permissive
|
bcaitech1/p4-mod-model_diet
|
4268091d829ba0667807a88a523682385aca8df7
|
36d8a747e12c375b07d132ed4d08f9fc77126a8b
|
refs/heads/main
| 2023-05-29T03:10:51.761647
| 2021-06-20T08:56:41
| 2021-06-20T08:56:41
| 370,229,481
| 1
| 0
|
MIT
| 2021-06-20T08:56:43
| 2021-05-24T04:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,480
|
py
|
"""Linear module, generator.
- Author: Jongkuk Lim
- Contact: lim.jeikei@gmail.com
"""
from typing import Union
import torch
from torch import nn as nn
from src.modules.base_generator import GeneratorAbstract
from src.utils.torch_utils import Activation
class Linear(nn.Module):
"""Linear module."""
def __init__(self, in_channel: int, out_channel: int, activation: Union[str, None]):
"""
Args:
in_channel: input channels.
out_channel: output channels.
activation: activation name. If None is given, nn.Identity is applied
which is no activation.
"""
super().__init__()
self.linear = nn.Linear(in_channel, out_channel)
self.activation = Activation(activation)()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward."""
return self.activation(self.linear(x))
class LinearGenerator(GeneratorAbstract):
"""Linear (fully connected) module generator for parsing."""
def __init__(self, *args, **kwargs):
"""Initailize."""
super().__init__(*args, **kwargs)
@property
def out_channel(self) -> int:
"""Get out channel size."""
return self.args[0]
def __call__(self, repeat: int = 1):
# TODO: Apply repeat
act = self.args[1] if len(self.args) > 1 else None
return self._get_module(
Linear(self.in_channel, self.out_channel, activation=act)
)
|
[
"smithfrancis313@gmail.com"
] |
smithfrancis313@gmail.com
|
c68f7274b878d6ce38cc6f4bf60a1f81062543ae
|
a6a046433cfccb68693f88fc37922d0696790002
|
/guessNumber.py
|
2c5b4effd3c82b8c205c1ab9739ff662d8ac9587
|
[] |
no_license
|
SrishtiC-008/guessingGame
|
60a936847a836d5f2bba2d58b1742c78826e5038
|
cb28cbfa85b871f04013de645efb0556ac48fcc7
|
refs/heads/main
| 2023-07-18T03:04:10.119025
| 2021-09-02T11:36:16
| 2021-09-02T11:36:16
| 402,397,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
import random
print("Number Guessing Game")
chances = 0
number = random.randint(1,9)
guess = int(input("Guess a number between 1 and 9: "))
while chances < 5:
guess = int(input("Enter your guess: "))
if guess == number:
print("Congratulations, you won")
break
elif guess < number:
print("You are too low guess a higher number",guess)
else:
print("You are to high you need to guess lower",guess)
chances += 1
if not chances < 5:
print("You loose!!, the number is ",number)
|
[
"noreply@github.com"
] |
SrishtiC-008.noreply@github.com
|
b66f70766f6fe3c97d830918ab3d7c33e5f9c1d4
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/59/usersdata/161/49113/submittedfiles/testes.py
|
3ede0d9001c5c08b41881d224976a6c2ae167e4c
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
n=int(input('numero:'))
soma=0
for i in range(1,n+1,1):
soma=4*(-1*i/(2*i+1)
print('%.4f' %soma)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
00df566357d11d45e4d51f2dfaeb0fec227b0d60
|
9e7e7125dff233fdd9b2722199f103b4df66e7f0
|
/ex4.py
|
f0b8c0ce790b38c5602b4cdcfc8617445dda7a17
|
[] |
no_license
|
natalijascekic/MidtermExample1
|
d43b3478607bf6e8cc8b8a5db1d15e347cf6a565
|
5894d4d2ce5768c8d49f0aef4e0294fcb3e0e5b3
|
refs/heads/master
| 2021-04-06T08:27:00.915557
| 2018-03-08T10:45:33
| 2018-03-08T10:45:33
| 124,368,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 356
|
py
|
#suma cifara prirodnog broja
def suma_cifara(broj):
suma = 0
for i in str(broj):
cifra = int(i)
suma = suma + cifra
return suma
def suma_cifara_matematicara(broj):
while (broj > 0):
cifra = broj % 10
broj = broj // 10
print(cifra)
print(suma_cifara(12345))
print(suma_cifara_matematicara(12345))
|
[
"natalija.scekic@udg.edu.me"
] |
natalija.scekic@udg.edu.me
|
2ec758ced961e2640d9cce5faf9debaa46c1e165
|
959ecb8444de4f5dfd8bd309d31ec23e9b09f972
|
/twitter/pipeline.py
|
b638b230247f58327c9d4e90362a259d9ee78ac5
|
[] |
no_license
|
themichaelusa/dealeye
|
c5072ac9cfadee43747789a6dcece31824bcd47d
|
5f49e1bedb8a6cd437839b86efaad3c16eb21bb3
|
refs/heads/master
| 2023-03-09T11:34:58.081187
| 2020-04-22T23:32:39
| 2020-04-22T23:32:39
| 253,937,917
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,986
|
py
|
### STDLIB IMPORTS
import sys
## INIT COMMANDS
sys.path.append('../')
### LOCAL IMPORTS
import export
from store import TwitterDataDB
from scrape import TwitterScraper
from extract import TwitterDataExtractor
from url_utils import RedirectURLResolver
from email_utils import HunterEmailResolver
### PACKAGE IMPORTS
pass
### CONSTANTS
from constants import STATUS
from constants import SCHOOLS
### MACROS/LAMBDAS/GLOBALS
TW_DB = TwitterDataDB('../dealeye.db')
TW_SCRAPER = TwitterScraper()
TW_CLEANER = TwitterDataExtractor()
REDIR_URL_RESOLVER = RedirectURLResolver()
CONTACT_EMAIL_SCRAPER = HunterEmailResolver()
### MAIN ###
def scrape_users(location, status):
TW_SCRAPER.gen_keyword_space(location, status)
users_dict = TW_SCRAPER.pull_users_by_keywords_space()
TW_CLEANER.set_target(users_dict)
data_dicts = TW_CLEANER.gen_default_data_dicts()
for uid, data in data_dicts:
tw_db.add(str(uid), data)
def extract_user_contact_links():
users_dict = TW_DB.get_all_users_as_dict()
TW_CLEANER.set_target(users_dict)
### resolve profile urls
raw_profile_urls = TW_CLEANER.get_all_users_profile_urls()
profile_urls = REDIR_URL_RESOLVER(raw_profile_urls)
TW_CLEANER.filter_profile_urls(profile_urls)
### get valid twitter accounts from user acct descriptions
descs_data = TW_CLEANER.get_twitter_accounts_from_desc()
unames_dict, all_floating_urls = descs_data
urls_dict = TW_SCRAPER.get_twitter_urls_by_unames(unames_dict)
### prep all urls for redirect resolution + run resolver
urls_final = TW_CLEANER.get_all_desc_urls(*descs_data, urls_dict)
redir_urls = REDIR_URL_RESOLVER(urls_final)
TW_CLEANER.filter_desc_urls(redir_urls)
### after all data engineering --> store in DB
for id, data in TW_CLEANER.get_target().items():
TW_DB.write(id=id, data=data)
def extract_contact_emails_for_users():
users_dict = TW_DB.get_all_users_as_dict()
CONTACT_EMAIL_SCRAPER.set_target(users_dict)
## filter which emails to parse
ids_to_parse = []
for id, data in users_dict.items():
if len(data['description_urls']):
if not len(data['valid_contact_emails']):
ids_to_parse.append(id)
CONTACT_EMAIL_SCRAPER.scrape_contact_emails_for_users(
set(ids_to_parse))
for id, data in CONTACT_EMAIL_SCRAPER.get_target().items():
TW_DB.write(id=id, data=data)
def export_db_to_excel(ex_path):
users_dict = TW_DB.get_all_users_as_dict()
for id, data in users_dict.items():
data['valid_contact_emails'] = set(data['valid_contact_emails'])
try:
if not len(data['valid_contact_emails']):
data['valid_contact_emails'] = None
except Exception as e:
pass
data['checked_contact_domains'] = set(data['checked_contact_domains'])
try:
if not len(data['valid_contact_emails']):
data['checked_contact_domains'] = None
except Exception as e:
pass
users_dict[id] = data
export.users_to_xlsx(ex_path, users_dict.values())
if __name__ == '__main__':
#export_db_to_excel('../broader_terms.xlsx')
extract_user_contact_links()
|
[
"meu2@illinois.edu"
] |
meu2@illinois.edu
|
6d3a407300be31a52b42c56933a379199611b440
|
28c82784b17dd8bb1cbeb133e81958c33e875684
|
/6_lab_resonance.py
|
922e2997ef2b5ccbfd8c1ea04a5d26cc392c69dd
|
[
"MIT"
] |
permissive
|
dwahme/physics_calc_enabler
|
7e7c673601a1fda720114a9fea0e2a9b7e52e311
|
f0fc4c9214cd024344c5f0d3684b29666339395e
|
refs/heads/master
| 2020-04-03T00:27:56.566201
| 2018-12-31T17:48:17
| 2018-12-31T17:48:17
| 154,900,221
| 0
| 0
|
MIT
| 2018-12-31T17:48:18
| 2018-10-26T22:16:38
|
Python
|
UTF-8
|
Python
| false
| false
| 435
|
py
|
import functions as f
##################
# BEGIN DATA INPUT
##################
mass_g = 0
mass_g_uncert = 0
k_const = 0
oscillation_periods = [
0, 0
]
################
# END DATA INPUT
################
# CALCULATIONS
# predict the period of oscillation using mass_g and k_const
# get average oscillation period
# get uncertainty of oscillation period using spread
# determine if consistent? can be done manually
# RESULTS
|
[
"dwahme1@gmail.com"
] |
dwahme1@gmail.com
|
8b01c8a8e52c317ded383e0ea010e85e3d6512d6
|
1b9c3440db918240045ac9c271a8c4d5f376383c
|
/cogs/help.py
|
f9bd8227f94e6ed511ff09a7c356cfd922d0ab6f
|
[] |
no_license
|
wyz3r0/Sewayaki-bot-no-Aruya-chan
|
c535260901281f619278bde1e6c61d7dae6332c0
|
f8a8ffec0ac8cd5ca0dd9c0604639d986641f0ef
|
refs/heads/master
| 2023-05-08T03:36:39.692081
| 2021-05-25T19:53:25
| 2021-05-25T19:53:25
| 349,130,132
| 0
| 0
| null | 2021-05-25T19:53:25
| 2021-03-18T15:45:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,698
|
py
|
from discord import Embed
from discord.ext import menus
from discord.ext import commands
from discord.utils import get
from typing import Optional
from configparser import ConfigParser
cfg = ConfigParser()
cfg.read('/home/runner/Sewayaki-bot-no-Aruya-chan/cfg.ini')
def syntax(command):
cmd_and_aliases = "|".join([str(command), *command.aliases])
params = []
for key, value in command.params.items():
if key not in ("self", "ctx"):
params.append(f"[{key}]" if "NoneType" in str(value) else f"<{key}>")
params = " ".join(params)
return f"`{default_cfg.bot_prefix}{cmd_and_aliases} {params}`"
"""
class help_menue(ListPageSources):
def __init__(self, ctx, data):
self.ctx = ctx
super().__init__(data, per_page = 3)
async def write_page(self, menu, fielsd = []):
return
async def format_page(self, menu, commands):
fields = []
for cmd in commands:
fields.appaend()
"""
class help(commands.Cog):
def __init__(self, client):
self.client = client
self.client.remove_command("help")
async def cmd_help(self, ctx, command):
embed = Embed(title = f"`{command}`", description = syntax(command), colour = cfg.getint('general', 'bot_colour'))
embed.add_field(name = "Command description", value = command.help)
await ctx.send(embed = embed)
@commands.command(name = "help", aliases = ["h"])
async def h(self, ctx, cmd: Optional[str]):
"""shows this message"""
if cmd is None:
pass
else:
if (command := get(self.client.commands, name = cmd)):
await self.cmd_help(ctx, command)
else:
await ctx.send(f"> {cmd} dose not exist")
def setup(client):
client.add_cog(help(client))
|
[
""
] | |
9154c2615a4416e3641be86e6dd82525a25bdeda
|
2ecb310c0be7eb44bef347f69eb67ca28ba1fd69
|
/Ej_1.py
|
1f54b185b6b48e7cd50c40511a2d06bcb91f6022
|
[] |
no_license
|
DianaEstradaGarcia/PracticasPython
|
0a3a3c9a936b1eae4983fccc5e8dda84ca942973
|
b287cb381742c44f6543d27f2b81cea85c9b374e
|
refs/heads/master
| 2023-04-04T12:30:45.804064
| 2021-04-06T17:03:53
| 2021-04-06T17:03:53
| 341,021,508
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 912
|
py
|
numero1=int(input("enter first value "))
print(numero1)
#Uso "print" para comprabar que la función input funciona
numero2=int(input("enter second value "))
print(numero2)
print(type(numero1))
# Uso la funcion "type" para ver que tipo de varibale es numero1. En este caso es un numero entero (int)
mayor_que=numero1>numero2
print(mayor_que)
if mayor_que:
print("Number 1 is bigger than Number 2")
# prueba nº1. Imprimo solo con cadena de texto
print(str(numero1) + " is bigger than " + str(numero2))
# prueba nº2. Imprimo usando las variables, convertidas de nuevo a cadena de texto,
# concatenadas para obtener una única cadena
elif numero1==numero2:
print(str(numero1) + " is equal to " + str(numero2))
else:
print(str(numero2) + " is bigger than " + str(numero1))
# if not mayor_que: #equivalente a "else"
# print(str(numero2) + " is much bigger than " + str(numero1))
|
[
"sima123939@gmail.com"
] |
sima123939@gmail.com
|
3e30025d633cb0e086cacf1445281398ac862d4f
|
9c470198881812e20b8047529bd52cac5dcd5cb1
|
/quickread.py
|
4b4be7a7db20db45d7ac026bcfd2e5d80324ef79
|
[] |
no_license
|
briochh/ptproc
|
43d679637b9fd9b574def7357623224a25fc060c
|
cfda2fb4784fb8670313e3c96427037137be6b26
|
refs/heads/master
| 2021-01-24T17:39:08.844093
| 2015-11-23T11:30:46
| 2015-11-23T11:30:46
| 30,016,083
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This temporary script file is located here:
/Users/briochh/.spyder2/.temp.py
"""
from t2data import *
from t2grids import *
from t2listing import *
import os
import pytoughgrav as ptg
os.chdir('C:/Users/glbjch/Local Documents/Work/Modelling/Cotapaxi')
mod='Cota20150604_1' # define model name
os.chdir(mod)
if not os.path.exists('results'):
os.makedirs('results')
dat=t2data('flow2.inp')
grid=dat.grid
geo=mulgrid('grd.dat')
width=geo.bounds[1][1]-geo.bounds[0][1] #10.0
ptg.makeradial(geo,None,width)
results=t2listing('flow2.out')
os.chdir('results')
results.write_vtk(geo,'output.vtk',grid=grid,flows=True)
|
[
"brioch.hemmings@bristol.ac.uk"
] |
brioch.hemmings@bristol.ac.uk
|
7a0acf73291ae56248b13eac72392da0f2c746b1
|
fb843a509e03efd4220715582766fda5e763879d
|
/business/Store.py
|
32cf230804943fb10a40d15be48b4ec7ac2aeba8
|
[] |
no_license
|
santybm/mom
|
0c37bd579064f91b68b365ed63132b69d26b2651
|
e44ee6383eb6d3b09687c32d2709d22794046895
|
refs/heads/master
| 2021-01-10T04:00:21.506275
| 2015-11-01T18:56:55
| 2015-11-01T18:56:55
| 45,311,004
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
__author__ = 'Herb'
from parse_rest.datatypes import Object
class Store(Object):
pass
|
[
"Johanos@bu.edu"
] |
Johanos@bu.edu
|
d969b067a8794040e593162a61272a1202f041c6
|
4beabfbff92f922269ac96cc5a2b5ae04781cb13
|
/arduino_flattener.py
|
38b17dabd44be78587811b5d8b36229f380e6f48
|
[
"Apache-2.0"
] |
permissive
|
rumpeltux/sd-spi-communications-library
|
895520b50b6406dd861d46cc2928fa8ce13c2424
|
72eb81c41d31a3ea614af75e7f9a01e2776d74f1
|
refs/heads/master
| 2020-09-27T12:28:58.567943
| 2019-12-07T10:20:33
| 2019-12-07T10:20:33
| 226,516,743
| 1
| 0
|
Apache-2.0
| 2019-12-07T13:20:24
| 2019-12-07T13:20:24
| null |
UTF-8
|
Python
| false
| false
| 3,087
|
py
|
##
# @file arduino_flattener.py
# @author Wade Penson
# @date June, 2015
# @brief Flattens the folder structure making it easy to use the library
# with the Arduino IDE.
# @copyright Copyright 2015 Wade Penson
#
# @license Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import re
import sys
import shutil
core = (
"sd_spi_commands\.h",
"sd_spi_info\.h",
"sd_spi(\.c|\.h)",
"arduino_platform_dependencies\.cpp",
"sd_spi_platform_dependencies\.h",
)
unit_tests = (
"sd_spi_tests\.c",
"unit_tests\.ino",
"planckunit(\.c|\.h)",
"printf_redirect\.h",
"serial_c_iface(\.cpp|\.h)"
)
card_info = (
"card_info\.ino",
)
source_dir = "./"
default_dest_dir = "flatten"
source_paths = []
def copy_files(files, dest_dir):
for i in range(len(source_paths)):
if any(re.match(file_name_reg, source_paths[i][1]) for file_name_reg in files):
source_path = os.path.join(source_paths[i][0], source_paths[i][1])
dest_path = os.path.join(dest_dir, source_paths[i][1])
# print('Copied {} to {}'.format(source_path, dest_path))
shutil.copy(source_path, dest_path)
fix_includes(dest_path)
def fix_includes(file):
with open(file) as f:
out_fname = file + ".tmp"
out = open(out_fname, "w")
for line in f:
out.write(re.sub("#include\s*[\"](?:.*\/|)(.*\.h)\s*\"", "#include \"\g<1>\"", line))
out.close()
os.rename(out_fname, file)
for root, dirs, files in os.walk(source_dir):
for filename in files:
source_paths.append([root, filename])
# Create output folder
while True:
temp_dest_dir = raw_input("Output folder (default = flatten/): ")
if temp_dest_dir == "" or re.match("^\s+$", temp_dest_dir):
temp_dest_dir = default_dest_dir
if os.path.isdir(temp_dest_dir):
print "Folder " + temp_dest_dir + " already exists."
else:
try:
os.makedirs(temp_dest_dir)
os.makedirs(temp_dest_dir + "/core_files")
os.makedirs(temp_dest_dir + "/card_info")
os.makedirs(temp_dest_dir + "/unit_tests")
except OSError:
print "Error creating directory."
continue
break
# Copy files
copy_files(core, temp_dest_dir + "/core_files")
copy_files(core + card_info, temp_dest_dir + "/card_info")
copy_files(core + unit_tests, temp_dest_dir + "/unit_tests")
|
[
"wadepenson@gmail.com"
] |
wadepenson@gmail.com
|
376795b7290cf0947523c2c0f3d9bb3656459d5c
|
9157478eaf508276049f5fb80c2ec42f42b0d614
|
/oop/bus.py
|
ec44898aeacfa1d77877d105ded7ef2ef2a156c5
|
[] |
no_license
|
JorgeRobins/python-blockchain
|
f640a9c3fa74e9b2bcf107d08d10f10025a19111
|
7b60ca0178cccb893e6d21040fa1c4c7d3f10bac
|
refs/heads/master
| 2020-04-27T22:03:13.819268
| 2019-03-23T23:07:04
| 2019-03-23T23:07:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 447
|
py
|
from vehicle import Vehicle
class Bus(Vehicle):
def __init__(self, starting_top_speed=100):
# super calls the constructor of the base class so we also include them not overwrite
super().__init__()
self.passengers = []
def add_group(self, passengers):
self.passengers.extend(passengers)
bus1 = Bus(150)
bus1.add_warning('Test')
bus1.add_group(['Max', 'Manuel', 'Anna'])
print(bus1.passengers)
bus1.drive()
|
[
"jrobins@mazepoint.com"
] |
jrobins@mazepoint.com
|
33a9ca4954a0be409500c41ace35ca2aa409ae49
|
524c0d4a584caef20abc6745037e100a2a0c0655
|
/cryptomath.py
|
d2e6fe2a83026fb2689ad4d4f51068c4f6c23d53
|
[] |
no_license
|
Anthem9/CTF_Crypto
|
d26d4fe8ee99893e460ad68e17e18e4b54fb2555
|
de55e27fc1b371f3462bd1c32920aab440ef1958
|
refs/heads/master
| 2021-05-01T06:04:19.556653
| 2018-02-11T15:09:40
| 2018-02-11T15:09:40
| 121,135,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
#Cryptomath Module
def gcd(a, b):
#计算最大公约数
while a != 0:
a, b = b % a, a
return b
def findModInverse(a, m):
#计算模逆
if gcd(a, m) != 1:
return None
u1, u2, u3 = 1, 0, a
v1, v2, v3 = 0, 1, m
while v3 != 0:
q = u3 // v3
v1, v2, v3, u1, u2, u3 = (u1 - q * v1), (u2 - q * v2), (u3 - q * v3), v1, v2 , v3
return u1 % m
|
[
"guoxiaoxiaoyu@gmail.com"
] |
guoxiaoxiaoyu@gmail.com
|
1935e57dd7a332b65b5a71f3867409bf2495116c
|
a3783b1f5c075bb1ee3cb2d2d486710677204378
|
/lesson3_step6.py
|
eeeb095d79233f83dea0e89ddbfaaa2d99726a5e
|
[] |
no_license
|
Yuliya4325/Auto-tests-course
|
2c1a3240f3c8d8ececa63ea5cec03c4ab07f3d64
|
b70fcaf0f04c5374e7c3f8ac77c42b1aad67f1dc
|
refs/heads/master
| 2022-11-25T23:27:21.961714
| 2020-07-15T20:08:32
| 2020-07-19T15:48:00
| 278,929,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
from selenium import webdriver
import time
import math
try:
link = "http://suninjuly.github.io/redirect_accept.html"
browser = webdriver.Chrome()
browser.get(link)
button1 = browser.find_element_by_css_selector("button.trollface")
button1.click()
new_window = browser.window_handles[1]
browser.switch_to.window(new_window)
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
x_element = browser.find_element_by_id("input_value")
x = x_element.text
y = calc(x)
input1 = browser.find_element_by_id("answer")
input1.send_keys(y)
button2 = browser.find_element_by_css_selector("button.btn")
button2.click()
finally:
time.sleep(10)
browser.quit()
|
[
"Yuliya1@iMac-Yuliya.local"
] |
Yuliya1@iMac-Yuliya.local
|
c5934f4b90758b7b0e1417135576b43e12408f78
|
8f941c27b2fef6ca001d4c961b9969fb5c3af7aa
|
/farming/settings.py
|
7d23e64948b9ff1a2aaa94e2bccbdc121df5f5bf
|
[] |
no_license
|
smithellis/farming
|
1671e51a42109aa43e0df138e6dbb5df68e572a2
|
5073a0880ce63e54718d58c6036e3c05dd302dbc
|
refs/heads/master
| 2021-01-15T11:18:45.301090
| 2014-11-04T19:23:42
| 2014-11-04T19:23:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,045
|
py
|
"""
Django settings for farming project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g7u73@1(*r3ht(o57q^z=j5k00)e1dts(kg!j@b_z(w=&4e2n='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'farming.urls'
WSGI_APPLICATION = 'farming.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
|
[
"smithe@gmail.com"
] |
smithe@gmail.com
|
2ca40c9745cafec57f504ad00865b8a15eb016d0
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_15419.py
|
b16cd84c9dd57f33adebb10f84dbec1286edafa9
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 79
|
py
|
# Python comparing individual lists of lists elements
if x[i][0] > y[i][0]:
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
10a42a6cd0ea1f64e2bcbb93e47165bbf0af7d3a
|
4b6add2be2207c64f9bab6d2611dff45ba655613
|
/02_ProgramFlow/21_augmented assignment loop_ CHALLENGE.py
|
5cee4ab942734bfa75165ae646111589d6b832b8
|
[] |
no_license
|
bidhutdhakal/python
|
65a69d4c31239dc087dda41a6aba8aab021b351c
|
7174a33684df020b1afa339fa2d783e7614adb79
|
refs/heads/master
| 2022-11-15T02:10:13.250935
| 2020-07-19T10:23:17
| 2020-07-19T10:23:17
| 275,973,717
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~1~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# Early computers could only perform addn. In order to multiply one number by another, they performed repeated addition.
# For example, 5 * 8 was performed by adding 5 eight times.
# 5 + 5 + 5 + 5 + 5 +5 + 5 + 5 = 40
# Use a for loop, and augmented assignment, to give answer the result of multiplying number by multiplier.
number = 5
multiplier = 8
answer = 0
# add your loop after this comment
for i in range(multiplier):
answer += number
print(answer)
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~2~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
|
[
"bidhut1996@gmail.com"
] |
bidhut1996@gmail.com
|
a6a8afa4e3ca2f14f48f0c9ebd11520a7b0e9b34
|
a65622c6b3d8c570f113af3ee49b4c4f2ec4995c
|
/PY_files/username.py
|
0cf47754330637e87f5921af0ad86947059ec343
|
[] |
no_license
|
muthu255/python
|
62a88d32450d71a9297b65ae1865c6125a716a8d
|
4c1135388b2ba36ac4c23e952e064496db9cab82
|
refs/heads/master
| 2022-07-03T14:49:35.008669
| 2020-05-13T19:22:40
| 2020-05-13T19:22:40
| 263,722,427
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
import tkinter
from tkinter import filedialog
from tkinter import *
root=tkinter.Tk()
root.title('Enter your database crendtials')
e1=Entry(root,width=20,borderwidth=5,)
e1.grid(row=2,column=4,columnspan=3,padx=10,pady=10)
e2=Entry(root,width=20,borderwidth=5,show='*')
e2.grid(row=3,column=4,columnspan=5,padx=10,pady=10)
lb1=Label(root,text='USER NAME :').grid(row=2,column=3)
lb2=Label(root,text='PASSWORD :').grid(row=3,column=3)
def check():
user_name=e1.get()
Password=e2.get()
print('USER NAME :',user_name)
print('PASSWORD :',Password)
but=Button(root,text='OK',command=check)
but.grid(row=4,column=5)
|
[
"noreply@github.com"
] |
muthu255.noreply@github.com
|
46a2e88f482b70548c82568f1d10bf2234d6b0e0
|
7bededcada9271d92f34da6dae7088f3faf61c02
|
/pypureclient/flasharray/FA_2_20/models/array.py
|
b454053102b55f917520181b04db56e7ba183f91
|
[
"BSD-2-Clause"
] |
permissive
|
PureStorage-OpenConnect/py-pure-client
|
a5348c6a153f8c809d6e3cf734d95d6946c5f659
|
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
|
refs/heads/master
| 2023-09-04T10:59:03.009972
| 2023-08-25T07:40:41
| 2023-08-25T07:40:41
| 160,391,444
| 18
| 29
|
BSD-2-Clause
| 2023-09-08T09:08:30
| 2018-12-04T17:02:51
|
Python
|
UTF-8
|
Python
| false
| false
| 7,115
|
py
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.20
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_20 import models
class Array(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'banner': 'str',
'capacity': 'int',
'console_lock_enabled': 'bool',
'encryption': 'ArrayEncryption',
'eradication_config': 'EradicationConfig',
'idle_timeout': 'int',
'ntp_servers': 'list[str]',
'os': 'str',
'parity': 'float',
'scsi_timeout': 'int',
'space': 'Space',
'version': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'banner': 'banner',
'capacity': 'capacity',
'console_lock_enabled': 'console_lock_enabled',
'encryption': 'encryption',
'eradication_config': 'eradication_config',
'idle_timeout': 'idle_timeout',
'ntp_servers': 'ntp_servers',
'os': 'os',
'parity': 'parity',
'scsi_timeout': 'scsi_timeout',
'space': 'space',
'version': 'version'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
banner=None, # type: str
capacity=None, # type: int
console_lock_enabled=None, # type: bool
encryption=None, # type: models.ArrayEncryption
eradication_config=None, # type: models.EradicationConfig
idle_timeout=None, # type: int
ntp_servers=None, # type: List[str]
os=None, # type: str
parity=None, # type: float
scsi_timeout=None, # type: int
space=None, # type: models.Space
version=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
banner (str)
capacity (int): The usable capacity in bytes.
console_lock_enabled (bool)
encryption (ArrayEncryption)
eradication_config (EradicationConfig)
idle_timeout (int): The idle timeout in milliseconds. Valid values include `0` and any multiple of `60000` in the range of `300000` and `10800000`. Any other values are rounded down to the nearest multiple of `60000`.
ntp_servers (list[str])
os (str): Specifies the operating system. Valid values are `Purity`, `Purity//FA`, and `Purity//FB`.
parity (float): A representation of data redundancy on the array. Data redundancy is rebuilt automatically by the system whenever parity is less than `1.0`.
scsi_timeout (int): The SCSI timeout. If not specified, defaults to `60s`.
space (Space)
version (str)
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if banner is not None:
self.banner = banner
if capacity is not None:
self.capacity = capacity
if console_lock_enabled is not None:
self.console_lock_enabled = console_lock_enabled
if encryption is not None:
self.encryption = encryption
if eradication_config is not None:
self.eradication_config = eradication_config
if idle_timeout is not None:
self.idle_timeout = idle_timeout
if ntp_servers is not None:
self.ntp_servers = ntp_servers
if os is not None:
self.os = os
if parity is not None:
self.parity = parity
if scsi_timeout is not None:
self.scsi_timeout = scsi_timeout
if space is not None:
self.space = space
if version is not None:
self.version = version
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Array, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Array):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"noreply@github.com"
] |
PureStorage-OpenConnect.noreply@github.com
|
d0118703cd1be666f42813c975c6d146b4aba794
|
f72a82c5d8bc76078b9969ddd45d2cb94b65653d
|
/ccc/py/ccc15j3_oneline.py
|
a0e5b0f32ce89b79a0d8630a467725a6f845a374
|
[
"MIT"
] |
permissive
|
tylertian123/CompSciSolutions
|
1f76d6d7b60358ea24812b96b9f3d7d54c5ffdc8
|
33769a20ea613439f92055b40deeac4927cb0a91
|
refs/heads/master
| 2021-06-17T20:55:49.812172
| 2021-02-21T05:42:24
| 2021-02-21T05:42:24
| 179,722,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 194
|
py
|
for c in input():print(c,(min((abs(ord(c)-ord(v)),v)for v in'aeiou')[1]+((chr(ord(c)+1)if chr(ord(c)+1)not in'aeiou'else chr(ord(c)+2))if c!='z'else'z'))if c not in('aeiou')else'',sep='',end='')
|
[
"tylertian123@gmail.com"
] |
tylertian123@gmail.com
|
512035201e3a6c4d9414e34ad579f8b05cf82917
|
ff55d72d5265d42ce57f0e95347421c23c0e32f5
|
/python/p3
|
6b727111864558ab2950eca30cd757cb4a929756
|
[] |
no_license
|
tolgaman/testrepo
|
927f563cdd8eb00557b231766cd20260b63f9d11
|
39fa5d1e82ccfb3931bc4a73bd41e187a005fe1f
|
refs/heads/master
| 2020-04-05T22:44:04.441075
| 2019-07-24T22:12:17
| 2019-07-24T22:12:17
| 32,182,896
| 0
| 0
| null | 2015-03-13T21:58:09
| 2015-03-13T21:58:09
| null |
UTF-8
|
Python
| false
| false
| 1,885
|
#!/bin/python
pwd="lnzxklcvgonolxoy"
import email, getpass, imaplib, os
detach_dir = '.' # directory where to save attachments (default: current)
user = raw_input("Enter your GMail username:")
#pwd = getpass.getpass("Enter your password: ")
# connecting to the gmail imap server
m = imaplib.IMAP4_SSL("imap.gmail.com")
m.login(user,pwd)
m.select("cs2043") # here you a can choose a mail box like INBOX instead
# use m.list() to get all the mailboxes
resp, items = m.search(None, "ALL") # you could filter using the IMAP rules here (check http://www.example-code.com/csharp/imap-search-critera.asp)
items = items[0].split() # getting the mails id
for emailid in items:
resp, data = m.fetch(emailid, "(RFC822)") # fetching the mail, "`(RFC822)`" means "get the whole stuff", but you can ask for headers only, etc
email_body = data[0][1] # getting the mail content
mail = email.message_from_string(email_body) # parsing the mail content to get a mail object
#Check if any attachments at all
if mail.get_content_maintype() != 'multipart':
continue
print "["+mail["From"]+"] :" + mail["Subject"]
# we use walk to create a generator so we can iterate on the parts and forget about the recursive headach
for part in mail.walk():
# multipart are just containers, so we skip them
if part.get_content_maintype() == 'multipart':
continue
# is this part an attachment ?
if part.get('Content-Disposition') is None:
continue
#filename = part.get_filename()
filename = mail["From"] + "_hw1answer"
att_path = os.path.join(detach_dir, filename)
#Check if its already there
if not os.path.isfile(att_path) :
# finally write the stuff
fp = open(att_path, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
|
[
"harun@eagle.crest.com"
] |
harun@eagle.crest.com
|
|
400773e1388d41b266ef404a2c92ffa8990ba3f4
|
f5b4ff16138994d97bbdd7bc6bc9f3d6bbba9a1e
|
/Otp.spec
|
583003b8b3484a4d402bb0b891fb09e75127aa0b
|
[] |
no_license
|
Sudhir7832/Otp.pyw
|
903b31d7b21a79e3f1e5d5fa2718a1f1cdc45863
|
2365721fc1b684ce74652c3ee870ef1d5573a5f2
|
refs/heads/master
| 2023-07-11T16:18:26.864103
| 2021-08-12T15:31:28
| 2021-08-12T15:31:28
| 395,364,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 974
|
spec
|
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['Otp.pyw'],
pathex=['C:\\Users\\DELL\\Desktop\\otp'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
[],
exclude_binaries=True,
name='Otp',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=False )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='Otp')
|
[
"Skm02862@gmail.com"
] |
Skm02862@gmail.com
|
a02d20ef716ffb5e171da88ab492aba679f903ae
|
c250f7e1f1cbbc45bf7a6f28f8169cf6e7c74b38
|
/venv/Scripts/pip3-script.py
|
91405bcc9a5225247189fbc367eae4dcef36f1aa
|
[] |
no_license
|
Binetou1996/FirstProject1
|
40d74e4d1154a842ca2edbee0431ffcf17521c82
|
4af9b0269ac1c4e4ad2557e48b54934b4af5ca5f
|
refs/heads/master
| 2020-04-10T23:01:25.721331
| 2018-12-20T18:49:56
| 2018-12-20T18:49:56
| 161,339,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
#!C:\Users\HP\PycharmProjects\DjangoProject\FirstProject\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
|
[
"fatoubinetou196@gmail.com"
] |
fatoubinetou196@gmail.com
|
bf43644aded3dbfbc4cbf8ae8a0f2aa704ba25ce
|
d416d916492cd48b75d9f21bbf01625072df4820
|
/DMeans.py
|
1e889061d9f6adf6fa81f3c7c017a52a9224cc51
|
[] |
no_license
|
ZongweiZhou1/Multitude
|
da4392940d60e1cc827e31bdf75ce64e6d9f688d
|
db2a9df7ac00f13635b8a1408d9a01bbed1f846c
|
refs/heads/master
| 2020-06-26T00:09:55.397847
| 2020-01-13T03:33:35
| 2020-01-13T03:33:35
| 199,463,718
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,131
|
py
|
import numpy as np
import random
from time import time
from dynmeans_py.dynmeans import DynMeans
from utils.matching import matching
class DMeans():
def __init__(self, v_lambda, T_Q, K_tau, nRestarts=10, match_func=matching):
Q = v_lambda / T_Q
tau = (T_Q * (K_tau - 1.0) + 1.0) / (T_Q - 1.0)
self.dynmeans = DynMeans(v_lambda, Q, tau)
if nRestarts <= 0:
raise ValueError('libdynmeans: ERROR: Cannot have nRestarts <=0')
self.nRestarts = nRestarts
self.match_func = match_func
def cluster(self, newobs, ref_obs, verbose=False):
"""
:param newobs: N x 2, double
:param ref_obs: M x 2, double
:param verbose:
:return:
"""
tStart = time()
if len(newobs) == 0:
raise ValueError('libdynmeans: ERROR: newobservations is empty')
newobservations = newobs.flatten().tolist()
ref_observations = ref_obs.flatten().tolist()
self.dynmeans.set_data(newobservations, ref_observations)
observations_num = len(newobs)
randOrderings = list(range(observations_num))
finalObj = np.inf
if verbose:
print("libdynmeans: Clustering {} datapoints with {}"
" restarts.".format(observations_num, self.nRestarts))
for i in range(self.nRestarts):
self.dynmeans.set_tmpVariables(observations_num)
obj, prevobj = np.inf, np.inf
random.shuffle(randOrderings)
for j in range(100):
prevobj = obj
self.dynmeans.assignObservations(randOrderings)
prms = np.array(self.dynmeans.first_updateParams()).reshape(-1, 2)
mars, mrds = self.match_func(prms, ref_obs) # input args are all 2d
obj = self.dynmeans.setParameters(mars, mrds)
if obj > prevobj:
print("Error: obj > prevobj - monotonicity violated! Check your distance/set parameter functions...")
print("obj: {}, prevobj:{}".format(obj, prevobj))
break
if verbose:
print("libdymeans: Trail: [{}/{}] objective: {}".format(i+1, self.nRestarts, obj))
self.dynmeans.pin_debug(1)
if obj == prevobj:
break
if obj < finalObj:
finalObj = obj
self.dynmeans.set_finalPrms()
if verbose:
print('libdynmeans: Done clustering. Min Objective: {}'.format(finalObj))
self.dynmeans.pin_debug(2)
finalLabels = self.dynmeans.updateState()
tTaken = time() - tStart
return finalLabels, finalObj, tTaken
if __name__=='__main__':
v_lambda = 0.05
T_Q = 6.8
K_tau = 1.01
dmeans = DMeans(v_lambda, T_Q, K_tau)
for i in range(100):
newobs = np.random.rand(50, 2)
ref_obs = np.random.rand(8, 2)
finalLabels, finalObj, tTaken = dmeans.cluster(newobs, ref_obs, verbose=True)
print(finalLabels)
print(finalObj)
print(tTaken)
|
[
"zhouzongwei2016@ia.ac.cn"
] |
zhouzongwei2016@ia.ac.cn
|
044ef7733d33340e7cf093fa5b1b04a826c31548
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_summary.py
|
18d09be192ac1b4023f64ab173806411d3dcea87
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
#calss header
class _SUMMARY():
def __init__(self,):
self.name = "SUMMARY"
self.definitions = [u'done suddenly, without discussion or legal arrangements: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
b6f61d9cae0e2d0f9dc640ed398d6d16ccf52c85
|
2e80c9630036d493400a5d568f2bae952ddd438a
|
/contents/1_Demo/05장 클래스/상속03_재정의.py
|
8eec3a91f2b0fe05c6a0375d0847b8878f3d9b58
|
[] |
no_license
|
gregor77/start-python
|
2a18847f93c3630d4106d342b63632b5a1ec119d
|
26481941f61eddd1f34aa53aa4c6f4afd0c03580
|
refs/heads/master
| 2021-01-10T10:56:43.225164
| 2015-10-29T08:20:26
| 2015-10-29T08:20:26
| 44,936,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
class Person:
" Super Class "
def __init__(self, name, phoneNumber):
self.Name = name
self.PhoneNumber = phoneNumber
def PrintInfo(self):
print("Info(Name:{0}, Phone Number: {1}".format(self.Name,
self.PhoneNumber))
def PrintPersonData(self):
print("Person(Name:{0}, Phone Number: {1}".format(self.Name, self.PhoneNumber))
class Student(Person):
" Sub Class "
def __init__(self, name, phoneNumber, subject, studentID):
#명시적으로 Person 생성자를 호출
Person.__init__(self, name, phoneNumber)
self.Subject = subject
self.StudentID = studentID
def PrintStudentData(self):
print("Student(Subject: {0}, Student ID: {1}".format(self.Subject,
self.StudentID))
def PrintInfo(self): #Person의 PrintInfo()메서드를 재정의
print("Info(Name:{0}, Phone Number:{1}".format(self.Name,
self.PhoneNumber))
print("Info(Subject:{0}, Student ID:{1}".format(self.Subject,
self.StudentID))
p = Person("Derick", "010-222-3333")
s = Student("Marry", "010-333-4444", "Computer Scient", "990000")
|
[
"gregor77@naver.com"
] |
gregor77@naver.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.