text
stringlengths 8
6.05M
|
|---|
# Generated by Django 2.2 on 2020-10-04 12:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instagram', '0002_api_error_instagram_account'),
]
operations = [
migrations.AddField(
model_name='instagram_accounts',
name='current_proxy',
field=models.CharField(default='188.166.83.102', max_length=200, verbose_name='Proxy IP Adress'),
),
]
|
from . import legacy
from .keras_pipeline import KerasPipeline
from .utils import copytree, save_parameter_dict
|
# Dato un numero n, contare le stringhe lunghe n su un alfabeto ternario {'a',
# 'b', 'c'} in cui #a <= #b <= #c, in tempo O(n * S(n)).
#
# Esempio:
# n | conteggio
# --|----------
# 1 | 1 ('c')
# 2 | 3 ('cc', 'cb', 'bc')
# 3 | 10 ('cba' e permutazioni, 'ccc', 'ccb' e combinazioni)
# 4 | 24
def ternario(n):
'''
Restituisce il numero di stringhe con alfabeto ternario {'a', 'b', 'c'}
tale che #a <= #b <= #c.
'''
def genera(i = 0, a = 0, b = 0, c = 0):
if i == n: # foglia
return 1
else: # nodo interno
total = 0
# Aggiunta di una 'a'.
b1 = max(0, a + 1 - b) # b necessarie a coprire la 'a' aggiunta
c1 = max(0, b + b1 - c) # c necessarie per coprire a sua volta 'b'.
if b1 + c1 <= n - i - 1:
total += genera(i + 1, a + 1, b, c)
# Aggiunta di una 'b'.
b1 = max(0, a - (b + 1))
c1 = max(0, b + 1 + b1 - c)
if b1 + c1 <= n - i - 1:
total += genera(i + 1, a, b + 1, c)
# Aggiunta di una 'c'.
b1 = max(0, a - b)
c1 = max(0, b + b1 - (c + 1))
if b1 + c1 <= n - i - 1:
total += genera(i + 1, a, b, c + 1)
return total
return genera()
|
import classes as c
x = c.Datum(-1.1, 0.08)
print(x)
paolo = c.Person("Paolo")
paolo.display()
print(paolo)
|
import socket
IP = '10.2.4.64' # 修改为别人的 IP PORT
port = 8812
address = (IP, port)
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cli.connect(address)
while True:
msg=input('type your msg')
msg = '马塞洛:{}'.format(msg)
cli.send(msg.encode('utf8'))
remsg= cli.recv(1024)
print(remsg.decode('utf8'))
if remsg is None:
cli.close()
break
|
from django.db import models
from django.utils.translation import gettext_lazy as _
from users.models import User
class SongGroup(models.Model):
name = models.CharField(verbose_name="分组名称", max_length=50)
user = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
db_table="singer_song_group"
verbose_name_plural = verbose_name = "歌曲分组"
def __str__(self):
return self.name
#点歌歌曲
class Song(models.Model):
name = models.CharField(verbose_name="歌曲名称", max_length=50)
create_time = models.DateTimeField(verbose_name="创建时间", auto_now=True)
update_time = models.DateTimeField(verbose_name="更新时间", auto_now=True)
singer = models.CharField(_("原唱歌手"), max_length=50)
user = models.ForeignKey(User, on_delete=models.CASCADE)
group = models.ForeignKey(SongGroup, verbose_name=_("分组名称"), on_delete=models.CASCADE, blank=True, null=True, related_name="songs")
is_pub = models.BooleanField(verbose_name="是否发布", default=True)
class Meta:
db_table="singer_song"
verbose_name_plural = verbose_name = "歌曲"
def __str__(self):
return self.name
#演唱列表
class SongList(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE, verbose_name="用户")
song = models.CharField(verbose_name="歌名", max_length=50, blank=True)
create_time = models.DateTimeField(verbose_name="点歌时间", auto_now=True)
sang_time = models.DateTimeField(verbose_name="唱歌时间", auto_now=True)
sponsor = models.CharField(verbose_name="打赏人",max_length=50)
money = models.DecimalField(verbose_name="打赏金额",max_digits=6,decimal_places=2,default=0)
is_sang = models.BooleanField(verbose_name="是否已唱", default=False)
class Meta:
db_table="singer_song_list"
verbose_name_plural = verbose_name = "点歌列表"
|
import os
import json
from decouple import config, Csv
from django import template
from django.db.models import Count
from accounts.models import User
from ..models import Category, Post, BibleStudies, Devotion
register = template.Library()
@register.filter
def human_format(num): # format long number like 1000 to 1k....
num = float("{:.3g}".format(num))
# num = '{:.3}'.format(float(num))
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return '{}{}'.format('{:f}'.format(num).rstrip('0').rstrip('.'), ['', 'k', 'm', 'b', 't', 'p'][magnitude])
# register.filter('human_format', human_format)
# print(human_format(999999))
@register.simple_tag(takes_context=True)
def user_categories_list(context):
# retrive user topic / categories
request = context['request']
return Category.objects.filter(user=request.user.id, is_active=True)
@register.simple_tag
def popular_post(count=4):
# show most liked post. Popular post
return Post.objects.annotate(like_count=Count('likes'), total_post_comments=Count('comment')).order_by('-like_count')[:count]
@register.simple_tag(takes_context=True)
def who_to_follow(context, count=1):
# retrive random users for a login user to follow...
request = context['request']
return User.objects.filter(is_active=True).order_by('?').exclude(id=request.user.id).distinct()[:count]
@register.simple_tag
def google_analytics_id():
return config("GOOGLE_ANALYTICS_ID")
|
from django.conf.urls import url
urlpatterns = [
url(r'registration/$', 'registration.views.registration', name='registration'),
url(r'register-complete/$', 'registration.views.register_complete', name='register_complete'),
]
|
# chat/consumers.py
import json
from asgiref.sync import async_to_sync,sync_to_async
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from chat.models import Message
from django.conf import settings
from .views import get_last_10_messages,get_curent_chat
from channels.db import database_sync_to_async
# from user.models import Message
from accounts.models import User
#User=settings.AUTH_USER_MODEL
class ChatConsumer(AsyncJsonWebsocketConsumer):
async def fetch_messages(self,data):
print('fetching')
messages=await database_sync_to_async(get_last_10_messages)(int(self.room_name))
message_json = await self.messages_to_json(messages,self.room_name)
context ={
'command': 'messages',
'messages' : message_json
}
await self.send_message(context)
# def typing(self,data) :
# person =await database_sync_to_async(User.objects.get)(username=data['username'])
# context ={
# 'command':'typing',
# 'type':data['type'],
# 'message':{
# 'name':person.username
# }
# }
# await self.send_chat_message(context)
""" def online(self,data) :
person= User.objects.get(username=data['username'])
context ={
'command':'online',
'message':{
'name':person.username
}
}
self.send_chat_message(context)"""
async def new_messages(self,data) :
print("new message")
user = await database_sync_to_async(User.objects.get)(username =data["from"])
# author_user=User.objects.filter(username=contact)[0]
message = await database_sync_to_async(Message.objects.create)(user=user,content=data['message'])
message_json = await self.message_to_json(message,self.room_name)
content={
'command':'new_message',
'message': message_json
}
current_chat = await database_sync_to_async(get_curent_chat)(self.room_name)
await database_sync_to_async(current_chat.messages.add)(message)
await database_sync_to_async(current_chat.save)()
# print(data['message'])
return await self.send_chat_message(content)
async def send_media(self,event) :
item = event["item"]
content = {
"url" : item["url"],
"media_type":item["media_type"],
"caption" : item["caption"],
"author" : item["user"],
"command" : "media"
}
await self.send_message(content)
@database_sync_to_async
def messages_to_json(self,messages,id) :
result = []
for message in messages:
if message.content_type :
media_type = message.content_type.model
result.append({
'id':message.id,
'author':message.user.username,
'url':message.item.file.url,
'title':message.item.title,
'timestamp':str(message.timestamp),
'chatId':id,
"media_type":media_type
})
else :
result.append({
'id':message.id,
'author':message.user.username,
'content' : message.content,
'timestamp':str(message.timestamp),
'chatId':id
})
return result
@database_sync_to_async
def message_to_json(self,message,id):
return {
'id':message.id,
'author':message.user.username,
'content':message.content,
'timestamp':str(message.timestamp),
'chatId':id
}
commands ={
'fetch_messages': fetch_messages,
'new_message' : new_messages,
# 'online':online,
#'typing':typing,
'media':send_media
}
async def connect(self):
print("connecting")
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % str(self.room_name)
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
async def receive_json(self, text_data):
data = text_data
await self.commands[data['command']](self,data)
async def send_chat_message(self,message) :
#message =data_json['message']
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
print(self.room_group_name)
async def send_message(self,context) :
await self.send_json(content=context
)
# Receive message from room group
async def chat_message(self, event):
# print('on chat.message worked')
message = event['message']
# Send message to WebSocket
await self.send_json(content={
'message':message
}
)
|
#!/usr/bin/python3
"""
Prints all City objects from the database hbtn_0e_14_usa
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sys import argv
from model_state import Base, State
from model_city import City
if __name__ == '__main__':
try:
engine = create_engine(
'mysql+mysqldb://{}:{}@localhost:3306/{}'.format(
argv[1], argv[2], argv[3]), pool_pre_ping=True
)
Session = sessionmaker(bind=engine)
session = Session()
results = session.query(City, State).filter(City.state_id == State.id)
for city, state in results.order_by(City.id).all():
print('{}: ({}) {}'.format(state.name, city.id, city.name))
session.close()
except Exception as e:
print("Error: {}".format(e))
|
import random
import pickle
from .wiki_category import WikiCategory
class WikiDataLoader:
"""Handle data loading and saving of articles from multiple wikipedia category.
Randomly select `N` wikipedia categories among `CATEGORIES` and load articles text extract for
each of them using the `WikiCategory` class.
Note:
When loading a category it starts by looking at a corresponding pickle. If None is found
then it will load the data directly from wikipedia API.
Attributes:
data (List[Dict]): Store for each categorie its name and all the retrieved texts.
"""
# Constant
CATEGORIES = [
"Category:Physics",
"Category:Arts",
"Category:Biology",
"Category:Electronics",
"Category:Earth sciences",
"Category:Diseases and disorders",
"Category:Chemistry",
"Category:Astronomy",
"Category:Sports",
"Category:Nutrition"
]
def __init__(self, N):
"""Initialize attributes and load the Categories.
Args:
N (int): Number of categories to load (must be between 1 and 10).
categories (List[str]): Store the selected categories.
"""
self.data = []
self.categories = []
try:
# category_indices = random.sample(range(10), N)
category_indices = range(N)
except ValueError:
print('The number of category N must be between 1 and 10')
for category_index in category_indices:
category_str = WikiDataLoader.CATEGORIES[category_index]
self.categories.append(category_str)
self.load(category_str)
def load(self, category_str):
"""Load the corresponding category either from pickle or `WikiCategory`.
Args:
category_str (str): Query string representing the category.
"""
filename = category_str.replace('Category:', '')
try:
texts = pickle.load(open('./saved_states/pickles/' + filename + '.pickle', "rb"))
print(category_str + " retrieved from file!")
except (OSError, IOError):
category = WikiCategory(category_str)
category.fetch_all_pageids()
category.fetch_all_text()
category.save_to_file()
texts = category.texts
self.data.append({
'category': self.categories.index(category_str),
'texts': texts
})
def getFullCorpus(self):
"""Return the whole corpus as a list of text."""
for data in self.data:
for text in data['texts']:
yield (data['category'], text)
|
from django import forms
from django.forms import extras
from datetime import datetime
class Register(forms.Form):
years_to_display = range(datetime.now().year - 100, datetime.now().year + 1)
first_name = forms.CharField(
label = "First Name",
max_length = 45,
min_length = 2,
widget = forms.TextInput(
attrs = {
"class": "form-control",
"placeholder": "Your first name",
}
)
)
last_name = forms.CharField(
label="Last Name",
max_length=45,
min_length = 2,
widget=forms.TextInput(
attrs={
"class": "form-control",
"placeholder": "Your last name",
}
)
)
email = forms.EmailField(
label = "Email",
max_length = 45,
widget = forms.TextInput(
attrs = {
"class": "form-control",
"placeholder": "Your email",
}
)
)
password = forms.CharField(
label = "Password",
max_length = 45,
min_length = 8,
widget = forms.PasswordInput(
attrs = {
"class": "form-control",
"placeholder": "Your password",
}
)
)
cpassword = forms.CharField(
label = "Confirm Password",
max_length = 45,
widget = forms.PasswordInput(
attrs = {
"class": "form-control",
"placeholder": "Your password",
}
)
)
birthday = forms.DateField(
widget = extras.SelectDateWidget (
years = years_to_display,
attrs = {
"class": "form-control",
"placeholder": "Your birthdate",
}
)
)
class Login(forms.Form):
email = forms.EmailField(
label = "Email",
max_length = 45,
widget = forms.TextInput(
attrs = {
"class": "form-control",
"placeholder": "Your email",
}
)
)
password = forms.CharField(
label = "Password",
max_length = 45,
min_length = 8,
widget = forms.PasswordInput(
attrs = {
"class": "form-control",
"placeholder": "Your password",
}
)
)
|
def bouncing_ball(initial, proportion):
output = 0
while initial > 1:
initial *= proportion
output +=1
return output
'''
You drop a ball from a given height. After each bounce,
the ball returns to some fixed proportion of its previous height.
If the ball bounces to height 1 or less, we consider it to have stopped bouncing.
Return the number of bounces it takes for the ball to stop moving.
bouncingBall(initialHeight, bouncingProportion)
boucingBall(4, 0.5)
After first bounce, ball bounces to height 2
After second bounce, ball bounces to height 1
Therefore answer is 2 bounces
boucingBall(30, 0.3)
After first bounce, ball bounces to height 9
After second bounce, ball bounces to height 2.7
After third bounce, ball bounces to height 0.81
Therefore answer is 3 bounces
Initial height is an integer in range [2,1000]
Bouncing Proportion is a decimal in range [0, 1)
'''
|
import argparse
import pandas as pd
import numpy as np
import os
import ntpath
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
"""
Preprocessing functions
"""
def normalize(x, scalerType):
"""
Nomalize the columns of the array passed
Parameters
==========
x : pandas.DataFrame
The input data array (NxM).
scalerType : sklearn.base.BaseEstimator
Type of scaler to use (StandardScaler or MinMaxScaler).
Returns
=======
: pandas.DataFrame
The array normalized.
"""
if len(x) > 1:
result = np.zeros(x.shape)
# Normalize data
for i in range(x.shape[1]):
# Select the column
col = x.to_numpy()[:, i]
# Normalize the column
scaler = scalerType()
result[:, i] = scaler.fit_transform(col.reshape(-1, 1)).squeeze()
return pd.DataFrame(result, columns=x.columns, index=x.index)
else:
raise Exception("The dimension does not match the expected ones")
def remove_outliers(x):
"""
Remove outliers (data outside of [1%, 99%]).
Parameters
==========
x : pandas.DataFrame
The input data array (NxM).
Returns
=======
: pandas.DataFrame
The array without outliers.
"""
result = x.copy()
# Remove outliners
for i in range(result.shape[1]):
# Select the column
col = result.iloc[:, i]
# Find data between 1%-99%
inLimits = col.between(col.quantile(0.01), col.quantile(0.99))
# Remove the others
result.drop(np.where(inLimits == False)[0], inplace=True)
result.reset_index(drop=True, inplace=True)
return result
def features_selection(dataset, n_components=5):
"""
Select the most important features of the data.
Parameters
==========
dataset : pandas.DataFrame
The input data array (NxM).
n_components : int
The number of features to keep.
Returns
=======
: pandas.DataFrame
A new array containing only the *n_components* most important features.
"""
# PCA
pca = PCA(n_components=n_components)
pca.fit(dataset)
# Project axes in reduced space
res = pca.transform(np.eye(dataset.shape[1]))
# Compute contribution
contrib = np.sum(abs(res), axis=1)
# Sort features
principal_features = np.argsort(contrib)
return principal_features[-1 : -n_components - 1 : -1]
def preprocess(filepath, norm, rm_outliers, scalerType="StandardScaler", max_comp=None):
"""
Preprocess the data of the wine in the *data* folder.
Parameters
==========
norm : boolean
Defined if the data have to be normalized.
rm_outliers : boolean
Defined if the outliers have to be removed.
scalerType : sklearn.base.BaseEstimator
Type of scaler to use (StandardScaler or MinMaxScaler).
n_components : int
The number of features to keep.
"""
options = ""
# Paths
path_dir = ntpath.dirname(filepath)
filename = ntpath.basename(filepath)
# Load data
data = pd.read_csv(path_dir+'/'+filename, sep=";")
# Drop NaN values
data = data.dropna(axis="index")
# Remove outliers
if rm_outliers:
data = remove_outliers(data)
options += "ro_"
# Normalize
if norm:
scalerType = StandardScaler if scalerType == "StandardScaler" else MinMaxScaler
data = normalize(data, scalerType)
options += "n_"
if max_comp is not None:
# Search for the X most contributing features
princ_comp = features_selection(data.iloc[:, :-1], max_comp)
# Add quality column
princ_comp = np.append(princ_comp, -1)
# Keep X principal components
data = data.iloc[:, princ_comp]
# Save
data.to_csv(path_dir + "/preprocessed_" + options + filename, index=False)
print("Preprocessing done !")
|
from src.cached_card_lookup import CachedCardLookup
from src.mongo import EXTRACTED_CARDS, AGGREGATED_CARD_SYNERGIES, AGGREGATED_CARDS, AGGREGATED_CARD_DECK_OCCURRENCES
from src.redis import CACHED_CARDS
class CardLoader:
def __init__(self, mongo, redis):
self.mongo = mongo
self.redis = redis
def run(self):
aggregated_card_synergies = self.mongo.get_all(AGGREGATED_CARD_SYNERGIES)
aggregated_card_deck_occurrences = self.mongo.get_all(AGGREGATED_CARD_DECK_OCCURRENCES)
extracted_cards = self.mongo.get_all(EXTRACTED_CARDS)
aggregated_card_synergies_by_card_key = {acs['key']: acs for acs in aggregated_card_synergies}
aggregated_card_deck_occurrences_by_card_key = {acdo['key']: acdo for acdo in aggregated_card_deck_occurrences}
joined_aggregated_card_synergies_and_deck_occurrences = [
dict(
**self._get_dict_projection(
aggregated_card_synergies_by_card_key.get(card_key, {}),
['key', 'name', 'names', 'synergies']
),
**self._get_dict_projection(
aggregated_card_deck_occurrences_by_card_key.get(card_key, {}),
['deckOccurrences']
)
)
for card_key in set(
list(aggregated_card_synergies_by_card_key.keys()) +
list(aggregated_card_deck_occurrences_by_card_key.keys())
)
]
aggregated_cards = []
cached_card_lookup = CachedCardLookup(extracted_cards)
for card_synergies_and_deck_occurrences in joined_aggregated_card_synergies_and_deck_occurrences:
card = cached_card_lookup.find(card_synergies_and_deck_occurrences['names'])
if not card:
continue
aggregated_cards.append(dict(
**card_synergies_and_deck_occurrences,
**self._get_dict_without_fields(card, ['name', 'names', 'key'])
))
self.mongo.replace_all(AGGREGATED_CARDS, aggregated_cards, 'key')
self.redis.cache_data(CACHED_CARDS, aggregated_cards)
@staticmethod
def _get_dict_projection(a_dict, fields_to_get):
return {k: v for k, v in a_dict.items() if k in fields_to_get}
@staticmethod
def _get_dict_without_fields(a_dict, fields_to_exclude):
return {k: v for k, v in a_dict.items() if k not in fields_to_exclude}
|
'''
The partially defined functions and classes of this module
will be called by a marker script.
You should complete the functions and classes according to their specified interfaces.
'''
import search
import sokoban
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def my_team():
'''
Return the list of the team members of this assignment submission as a list
of triplet of the form (student_number, first_name, last_name)
'''
return [(7521022, 'Jordan', 'Hawkes'),(7561555, 'Stewart','Whitehead')]
# return [ (1234567, 'Ada', 'Lovelace'), (1234568, 'Grace', 'Hopper'), (1234569, 'Eva', 'Tardos') ]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#Function relies on rule 1 being implemented perfectly (corner that isn't target is taboo)
def foundOtherTabooCorner(cellx, celly, startCellX, startCellY, wallDirection, *taboos):
#Found another taboo corner, cells lining walls are all taboo
if ( (cellx + wallDirection[0], celly + wallDirection[1]) in taboos):
# For walls travelling horizonatally
#if celly == startCellY:
if wallDirection == (1, 0) or (-1, 0):
for cellsTravelled in (abs (cellx - startCellX)):
if startCellX > cellx:
taboos.append((startCellX - cellsTravelled - 1))
elif startCellX < cellx:
taboos.append((startCellX + cellsTravelled + 1))
# For walls travelling vertically
#if cellx == startCellX:
if wallDirection == (0, 1) or (0, -1):
for cellsTravelled in (abs (celly - startCellY)):
if startCellY > celly:
taboos.append((startCellY - cellsTravelled - 1))
elif startCellY < celly:
taboos.append((startCellY + cellsTravelled + 1))
def taboo_cells(warehouse):
'''
Identify the taboo cells of a warehouse. A cell is called 'taboo'
if whenever a box get pushed on such a cell then the puzzle becomes unsolvable.
When determining the taboo cells, you must ignore all the existing boxes,
simply consider the walls and the target cells.
Use only the following two rules to determine the taboo cells;
Rule 1: if a cell is a corner and not a target, then it is a taboo cell.
Rule 2: all the cells between two corners along a wall are taboo if none of
these cells is a target.
@param warehouse: a Warehouse object
@return
A string representing the puzzle with only the wall cells marked with
an '#' and the taboo cells marked with an 'X'.
The returned string should NOT have marks for the worker, the targets,
and the boxes.
'''
## "INSERT YOUR CODE HERE"
tabooCells=[]
for x,y in warehouse.walls:
if (x-1,y+1) in warehouse.walls:
if(((x,y+1) not in warehouse.targets) & ((x,y+1) not in warehouse.walls) & ((x,y+1) not in tabooCells)):
tabooCells.append((x, y+1))
if(((x-1,y) not in warehouse.targets) & ((x-1,y) not in warehouse.walls) & ((x-1,y) not in tabooCells)):
tabooCells.append((x-1,y))
elif (x+1,y+1) in warehouse.walls:
if(((x+1,y) not in warehouse.targets) & ((x+1,y) not in warehouse.walls) & ((x+1,y) not in tabooCells)):
tabooCells.append((x+1, y))
if(((x,y+1) not in warehouse.targets) & ((x,y+1) not in warehouse.walls) & ((x,y+1) not in tabooCells)):
tabooCells.append((x,y+1))
elif (x+1,y-1) in warehouse.walls:
if(((x,y-1) not in warehouse.targets) & ((x,y-1) not in warehouse.walls) & ((x,y-1) not in tabooCells)):
tabooCells.append((x, y-1))
if(((x+1,y) not in warehouse.targets) & ((x+1,y) not in warehouse.walls) & ((x+1,y) not in tabooCells)):
tabooCells.append((x+1,y))
elif (x-1,y-1) in warehouse.walls:
if(((x,y-1) not in warehouse.targets) & ((x,y-1) not in warehouse.walls) & ((x,y-1) not in tabooCells)):
tabooCells.append((x,y-1))
if(((x-1,y) not in warehouse.targets) & ((x-1,y) not in warehouse.walls) & ((x-1,y) not in tabooCells)):
tabooCells.append((x-1,y))
originalTaboos =tabooCells;
north = (0, -1)
east = (1, 0)
south = (0, 1)
west = (-1, 0)
for cells in originalTaboos:
cellx = cells[0]
celly = cells[1]
startCellX = cellx
startCellY = celly
counter=0
cornerFound= False
while (cornerFound == False):
if (((cellx + 1, celly) not in warehouse.walls) & ((cellx + 1, celly) not in warehouse.targets) & ((cellx + 1, celly-1) in warehouse.walls)):
cellx=cellx+1
counter=counter+1
elif (cellx + 1, celly-1) not in warehouse.walls:
cornerFound=True
elif(cellx +1, celly) in warehouse.targets:
cornerFound=True
elif (cellx +1,celly) in warehouse.walls:
index = 0
while index < counter:
tabooCells.append(startCellX+index, startCellY)
index += 1
cornerFound=True
for cells in originalTaboos:
cellx = cells[0]
celly = cells[1]
startCellX = cellx
startCellY = celly
counter=0
cornerFound= False
while (cornerFound == False):
if (((cellx + 1, celly) not in warehouse.walls) & ((cellx + 1, celly) not in warehouse.targets) & ((cellx + 1, celly+1) in warehouse.walls)):
cellx=cellx+1
counter=counter+1
elif (cellx + 1, celly+1) not in warehouse.walls:
cornerFound=True
elif(cellx +1, celly) in warehouse.targets:
cornerFound=True
elif (cellx +1,celly) in warehouse.walls:
index = 0
while index < counter:
tabooCells.append(startCellX+index, startCellY)
index += 1
cornerFound=True
for cells in originalTaboos:
cellx = cells[0]
celly = cells[1]
startCellX = cellx
startCellY = celly
counter=0
cornerFound= False
while (cornerFound == False):
if (((cellx, celly+1) not in warehouse.walls) & ((cellx, celly+1) not in warehouse.targets) & ((cellx - 1, celly+1) in warehouse.walls)):
celly=celly+1
counter=counter+1
elif (cellx - 1, celly+1) not in warehouse.walls:
cornerFound=True
elif(cellx, celly +1) in warehouse.targets:
cornerFound=True
elif (cellx,celly+1) in warehouse.walls:
index = 0
while index < counter:
tabooCells.append(startCellX, startCellY +index)
index += 1
cornerFound=True
for cells in originalTaboos:
cellx = cells[0]
celly = cells[1]
startCellX = cellx
startCellY = celly
counter=0
cornerFound= False
while (cornerFound == False):
if (((cellx, celly+1) not in warehouse.walls) & ((cellx, celly+1) not in warehouse.targets) & ((cellx + 1, celly+1) in warehouse.walls)):
celly=celly+1
counter=counter+1
elif (cellx + 1, celly+1) not in warehouse.walls:
cornerFound=True
elif(cellx, celly +1) in warehouse.targets:
cornerFound=True
elif (cellx,celly+1) in warehouse.walls:
index = 0
while index < counter:
tabooCells.append(startCellX, startCellY +index)
index += 1
cornerFound=True
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class SokobanPuzzle(search.Problem):
'''
Class to represent a Sokoban puzzle.
Your implementation should be compatible with the
search functions of the provided module 'search.py'.
Use the sliding puzzle and the pancake puzzle for inspiration!
'''
## "INSERT YOUR CODE HERE"
def __init__(self, warehouse):
self.initial=warehouse
self.initial.read_warehouse_file(self,warehouse)
def actions(self, state):
"""
Return the list of actions that can be executed in the given state
if these actions do not push a box in a taboo cell.
The actions must belong to the list ['Left', 'Down', 'Right', 'Up']
"""
raise NotImplementedError
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def check_action_seq(warehouse, action_seq):
'''
Determine if the sequence of actions listed in 'action_seq' is legal or not.
Important notes:
- a legal sequence of actions does not necessarily solve the puzzle.
- an action is legal even if it pushes a box onto a taboo cell.
@param warehouse: a valid Warehouse object
@param action_seq: a sequence of legal actions.
For example, ['Left', 'Down', Down','Right', 'Up', 'Down']
@return
The string 'Failure', if one of the action was not successul.
For example, if the agent tries to push two boxes at the same time,
or push one box into a wall.
Otherwise, if all actions were successful, return
A string representing the state of the puzzle after applying
the sequence of actions. This must be the same string as the
string returned by the method Warehouse.__str__()
'''
## "INSERT YOUR CODE HERE"
raise NotImplementedError()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def solve_sokoban_elem(warehouse):
'''
This function should solve using elementary actions
the puzzle defined in a file.
@param warehouse: a valid Warehouse object
@return
A list of strings.
If puzzle cannot be solved return ['Impossible']
If a solution was found, return a list of elementary actions that solves
the given puzzle coded with 'Left', 'Right', 'Up', 'Down'
For example, ['Left', 'Down', Down','Right', 'Up', 'Down']
If the puzzle is already in a goal state, simply return []
'''
## "INSERT YOUR CODE HERE"
raise NotImplementedError()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def can_go_there(warehouse, dst):
'''
Determine whether the worker can walk to the cell dst=(row,col)
without pushing any box.
@param warehouse: a valid Warehouse object
@return
True if the worker can walk to cell dst=(row,col) without pushing any box
False otherwise
'''
## "INSERT YOUR CODE HERE"
raise NotImplementedError()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def solve_sokoban_macro(warehouse):
'''
Solve using macro actions the puzzle defined in the warehouse passed as
a parameter. A sequence of macro actions should be
represented by a list M of the form
[ ((r1,c1), a1), ((r2,c2), a2), ..., ((rn,cn), an) ]
For example M = [ ((3,4),'Left') , ((5,2),'Up'), ((12,4),'Down') ]
means that the worker first goes the box at row 3 and column 4 and pushes it left,
then goes the box at row 5 and column 2 and pushes it up, and finally
goes the box at row 12 and column 4 and pushes it down.
@param warehouse: a valid Warehouse object
@return
If puzzle cannot be solved return ['Impossible']
Otherwise return M a sequence of macro actions that solves the puzzle.
If the puzzle is already in a goal state, simply return []
'''
## "INSERT YOUR CODE HERE"
raise NotImplementedError()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
wh = sokoban.Warehouse()
wh.read_warehouse_file("./warehouses/warehouse_03.txt")
# field.write_warehouse_file("./F_01.txt")
|
import openpyxl
from WhatsAppUIAutomation.automation import WhatsAppUi
def load_excel(r, input_message):
workbook = openpyxl.load_workbook('./whatsapp_ui.xlsx')
print(str(workbook) + " Workbook Opened...")
sheet = workbook['whatsapp']
print(str(sheet) + " Reading...")
sl_no = sheet.cell(row=r, column=1).value
number = sheet.cell(row=r, column=2).value
whatsapp = WhatsAppUi()
whatsapp.search_number(number)
whatsapp.send_message(input_message)
# OUTPUT WRITE TO EXCEL FILE
sheet.cell(row=r, column=3).value = whatsapp.message
sheet.cell(row=r, column=4).value = whatsapp.sent_status
sheet.cell(row=r, column=5).value = whatsapp.read_status
sheet.cell(row=r, column=6).value = whatsapp.login_status
sheet.cell(row=r, column=7).value = whatsapp.logout_status
workbook.save('./whatsapp_ui.xlsx')
print("DATA INSERTED SUCCESSFULLY IN ROW " + str(r))
# print(f'SL No : {sl_no} \n Number : {number} \n '
# f'Message : {message} \n Sent Status : {whatsapp.sent_status} \n '
# f'Checked : {whatsapp.read_status} \n '
# f'Login : {whatsapp.login_status} \n Logout : {whatsapp.logout_status}')
if __name__ == "__main__":
while True:
row = int(input('Enter Excel Row Number : '))
message = input('Enter Message to Send : ')
load_excel(row, message)
# whatsapp = WhatsAppUi()
# whatsapp.search_number("+8801402004389")
# whatsapp.send_message("Message sent, Status Checked")
|
from .development import Dev
from .production import Pro
import pymysql
pymysql.install_as_MySQLdb()
|
from os.path import splitext, join, basename
import numpy as np
from torch import from_numpy
import torch
class WriteTensorToDisc(object):
def __init__(self, write_loc, path_annotations):
self.write_loc = write_loc
self.annotations = path_annotations
def __call__(self, sample):
name, _ = splitext(basename(sample["name"]))
base = name + ".pt"
save_file = join("data", sample["label"], base)
filesystem_name = join(self.write_loc, sample["label"], base)
torch.save(sample["data"], filesystem_name)
with open(self.annotations, "a") as f:
f.write("{},{},{}\n".format(save_file, sample["label"], "({},{},{},{})".format(
sample["data"].shape[0],
sample["data"].shape[1],
sample["data"].shape[2],
sample["data"].shape[3]
)))
return sample
|
import sys
s = "I_W1sh_I_H4d_IDA_inst4lled_But_Wh0_C4n_4ff0rd_Th4t"
xor = 0xd1
result = 0xd1
print "int xor_bytes[%d] = {" % len(s)
for i, c in enumerate(s):
xor = result ^ ord(c)
result ^= xor
sys.stdout.write("0x{:02x}".format(xor) + ", ")
if (i + 1) % 16 == 0:
print ""
print ""
print "}"
|
# simple network sniffer with raw sockets on windows.
# requires administrator privileges to modify the interface.
import socket
# the public network interface
HOST = socket.gethostbyname(socket.gethostname())
# create a raw socket and bind it to the public interface
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_IP)
s.bind((HOST, 0))
# include IP headers
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# receive all packages
s.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
# receive packages
while True:
print(s.recvfrom(65565))
# disabled promiscuous mode
s.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
|
""" Log output class
Created by mahiro hoshino
How to use:
logger = Logger().get_logger()
logger.error("error msg")
logger.debug("debug msg") etc...
@see https://docs.python.jp/3/howto/logging.html
Log output format:
time(year-month-day hour-minute-seconds,millisecond): function name: line number: log name: massage
"""
import logging
class Logger:
def __init__(self):
self._logger = logging.getLogger(__name__)
self._logger.setLevel(10)
# output file log.txt
file_handler = logging.FileHandler('log.txt')
self._logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
self._logger.addHandler(stream_handler)
# time(year-month-day hour-minute-seconds,millisecond): function name: line number: log name: massage
formatter = logging.Formatter('%(asctime)s:\t%(funcName)s:\t%(lineno)d:\t%(levelname)s:\t%(message)s')
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
def get_logger(self):
return self._logger
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-25 14:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_operation', '0009_auto_20180825_0255'),
]
operations = [
migrations.AlterField(
model_name='usermessages',
name='file',
field=models.FileField(blank=True, help_text='Files to Upload', null=True, upload_to='file/usermsg/', verbose_name='Files to Upload'),
),
migrations.AlterField(
model_name='usermessages',
name='msg_type',
field=models.IntegerField(choices=[(1, 'Feedback About Products'), (2, 'Complain'), (3, 'Gift Card'), (4, 'Shipping & Handling'), (5, 'Return & Exchange'), (6, 'Product Inquiries'), (7, 'Payment')], default=1, help_text='Message Type: 1: Feedback About Products, 2: Complain, 3: Gift Cards, 4: Shipping & Handling, 5: Return & Exchange, 6: Product Inquiries, 7: Payment', verbose_name='Message Type'),
),
]
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Point of Sale [Mimoki]',
'version': '1.0.1',
'category': 'Point Of Sale',
'author': 'TrendAV',
'maintainer': 'TrendAV',
'website': 'http://www.trendav.com',
'sequence': 21,
'summary': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process from PoS - TrendAV for Mimoki
=========================================================
""",
'depends': ['trend_point_of_sale'],
'data': [
'views/pos_view.xml',
'views/pos_static.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'application': True,
'qweb': ['static/src/xml/pos.xml',],
'auto_install': False,
}
|
"""
A zero-indexed array A consisting of N different integers is given. The array contains integers in the range [1..(N + 1)], which means that exactly one element is missing.
Your goal is to find that missing element.
Write a function:
def solution(A)
that, given a zero-indexed array A, returns the value of the missing element.
For example, given array A such that:
A[0] = 2
A[1] = 3
A[2] = 1
A[3] = 5
the function should return 4, as it is the missing element.
Assume that:
N is an integer within the range [0..100,000];
the elements of A are all distinct;
each element of array A is an integer within the range [1..(N + 1)].
Complexity:
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(1), beyond input storage (not counting the storage required for input arguments).
Elements of input arrays can be modified.
"""
def solution(A):
# write your code in Python 2.7
N = len(A)
#sum of 1 to N+1
total_N = int(0.5 * (N + 1) * (N + 2))
#difference between total_N and sum of elements in A gives you the missing number
return total_N - sum(A)
|
from django import template
register = template.Library()
@register.filter
def str_to_float(value):
try:
return float(value)
except ValueError as e:
return None
|
import pandas as pd, numpy as np
oneDList = [10, 20, 30, 40]
oneDTable = pd.DataFrame(oneDList)
print("Default Column Name:\n", oneDTable)
oneDTableIndex = pd.DataFrame({"ColName" : oneDList})
print("With Column Name:\n", oneDTableIndex)
withColNameAndRow = pd.Series(oneDList, index=["r1", "r2", "r3", "r4"])
print("With Column and Row Name:\n", withColNameAndRow)
withNpAndPd = pd.DataFrame({'Row1' : pd.Series(np.arange(51, 91, 2)), 'Row2' : pd.Series(np.arange(0, 20)), 'Row3' : pd.Series(np.arange(90, 50, -2))})
print("Dimension of withColNameAndRow:", withNpAndPd.ndim)
print("Range:", withNpAndPd.axes)
print("With Numpy, Panda Series in DataFrame:\n", withNpAndPd)
dict_var = [{'col1' : 1, 'col2': 2}, {'col1' : 20, 'col2' : 26, 'col3' : 71}, {'col1' : 50, 'col3' : 51}]
dFrameDict = pd.DataFrame(dict_var, index=['row1', 'row2', 'row3'])
print(dFrameDict)
stud1 = pd.Series([90, 95, 99], index=['Physics', 'Chemistry', 'Mathematics'])
stud2 = pd.Series([95, 98, 100, 100], index=['Physics', 'Chemistry', 'Mathematics', 'Cse'])
marksTable = pd.DataFrame({
"Dee" : stud1,
"Pan" : stud2
})
marksTable['Dp'] = pd.Series([90, 89, 95], index=['Cse', 'Chemistry', 'Mathematics'])
print("\nPanda stud1 series:\n", stud1)
print("\nPanda stud2 series:\n", stud2)
print("\n---Print All Marks---\n", marksTable)
addRow = pd.DataFrame([[70, 71, 73]], columns=['Dee', 'Dp', 'Pan'])
print("Type of addRow:", type(addRow))
print("Type of stud2:", type(stud2))
print("\n---Add New Row---")
marksTable = marksTable.append(addRow)
print(marksTable)
print("\n---Print Mathematics Row---\n", marksTable.loc['Mathematics'])
print("\n---Print 0th Row---\n", marksTable.iloc[0])
# del and pop is for removing columns
# drop is for removing rows
del(marksTable['Dp'])
print("\n---After removing Dp---\n", marksTable)
delEntry = marksTable.pop('Pan')
print("\n---Popping Pan Entry---\n", delEntry)
print("\n---After removing Pan---\n", marksTable)
marksTable = marksTable.drop(0)
print("\n---After Drop 0---\n", marksTable)
marksTable = marksTable.drop('Chemistry')
print("\n---After Drop Chemistry---\n", marksTable)
|
import math
import random
import sys
lastAnswer = 0.0
memory = 0.0
print("-- tCalc V1.2 -- Programmed by Bailey Dawson --")
def numGet(token):#get the number out of the string
if token == "m":
return memory
if token =="r":
return random.random()
if token == "p":
return math.pi
if str(token).isnumeric():
return token
return "ERROR"
def getAnswer(inp):#Take in a string, convert it to a answer
if "+" in inp:
if "+" == inp[0]:
try:
num = numGet(inp[1:])
if num != "ERROR": return lastAnswer + float(num)
else:
print("Invalid value | Code 1.1")
return "ERROR"
except:
print("Invalid value | Code 1.2")
return "ERROR"
else:
vals = inp.split("+")
return float(numGet(vals[0])) + float(numGet(vals[1]))
elif "-" in inp:
if "-" == inp[0]:
try:
num = numGet(inp[1:])
if num != "ERROR": return lastAnswer - float(num)
else:
print("Invalid value | Code 2.1")
return "ERROR"
except:
print("Invalid value | Code 2.2")
return "ERROR"
else:
vals = inp.split("-")
return float(numGet(vals[0])) - float(numGet(vals[1]))
elif "*" in inp:
if "*" == inp[0]:
try:
num = numGet(inp[1:])
if num != "ERROR": return lastAnswer * float(num)
else:
print("Invalid value | Code 3.1")
return "ERROR"
except:
print("Invalid value | Code 3.2")
return "ERROR"
else:
vals = inp.split("*")
return float(numGet(vals[0])) * float(numGet(vals[1]))
elif "/" in inp:
if "/" == inp[0]:
try:
num = numGet(inp[1:])
if num != "ERROR": return lastAnswer / float(num)
else:
print("Invalid value | Code 4.1")
return "ERROR"
except:
print("Invalid value | Code 4.2")
return "ERROR"
else:
vals = inp.split("/")
return float(numGet(vals[0])) / float(numGet(vals[1]))
elif "cos(" in inp:
vals = inp.split("(")
if vals[1] != "":
vals[1] = vals[1].replace(")", "")
num = numGet(vals[1])
if num != "ERROR": return math.cos(float(num))
elif "tan(" in inp:
vals = inp.split("(")
if vals[1] != "":
vals[1] = vals[1].replace(")", "")
num = numGet(vals[1])
if num != "ERROR": return math.tan(float(num))
elif "sin(" in inp:
vals = inp.split("(")
if vals[1] != "":
vals[1] = vals[1].replace(")", "")
num = numGet(vals[1])
if num != "ERROR": return math.sin(float(num))
return "Invalid Input | Code 0.2"
if len(sys.argv) > 1:
del sys.argv[0]
for x in sys.argv:
lastAnswer = getAnswer(x)
print(lastAnswer)
sys.exit()
print("Type 'exit' to stop, 'help' for help")
while True:
inp = input(":> ").replace(" ", "")
if inp == "exit": #Stop
break
elif inp == "help": # Help
print("You can add onto the last answer given, by typing '<operator><Number or value>'\nSupported operators:\n\t+\n\t-\n\t*\n\t/\n\tcos(<val>)\n\tsin(<val>)\n\ttan(<val>)\nUsing pi or memory:\n\tto use pi, type p.\n\tTo set memory, type m after it gives the answer you want in memory. To acces memory type m in a calculation")
continue
elif inp == "m": #Put last into memory
memory = lastAnswer
continue
else: #Calculation
lastAnswer = getAnswer(inp)
if lastAnswer != "ERROR":
print(lastAnswer)
continue
print("ERROR")
continue
print("Invalid Input | Code 0.1")
print("ERROR")
|
# coding: utf-8
import argparse
import os.path
import numpy as np
import scipy as sp
import pandas as pd
import hail as hl
from hail.linalg import BlockMatrix
from hail.utils import new_temp_file
gnomad_latest_versions = {"GRCh37": "2.1.1", "GRCh38": "3.1.2"}
gnomad_pops = {"GRCh37": ["afr", "amr", "eas", "fin", "nfe"], "GRCh38": ["afr", "amr", "eas", "fin", "nfe", "sas"]}
gnomad_ld_variant_indices = {
"GRCh37": "gs://gcp-public-data--gnomad/release/2.1.1/ld/gnomad.genomes.r2.1.1.{pop}.common.adj.ld.variant_indices.ht",
"GRCh38": "gs://finucane-requester-pays/slalom/gnomad/release/2.1.1/ld/gnomad.genomes.r2.1.1.{pop}.common.adj.ld.variant_indices.b38.ht",
}
class ParseKwargs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, dict())
for value in values:
key, value = value.split("=")
if value.isnumeric():
value = float(value)
getattr(namespace, self.dest)[key] = value
# cf. https://github.com/armartin/prs_disparities/blob/master/run_prs_holdout.py
def flip_text(base):
"""
:param StringExpression base: Expression of a single base
:return: StringExpression of flipped base
:rtype: StringExpression
"""
return hl.switch(base).when("A", "T").when("T", "A").when("C", "G").when("G", "C").default(base)
def align_alleles(ht, ht_gnomad, flip_rows=None):
ht = ht.annotate(
**(
hl.case()
.when(
hl.is_defined(ht_gnomad[ht.locus, hl.array([ht.alleles[0], ht.alleles[1]])]),
hl.struct(alleles=[ht.alleles[0], ht.alleles[1]], flip_row=False),
)
.when(
hl.is_defined(ht_gnomad[ht.locus, hl.array([ht.alleles[1], ht.alleles[0]])]),
hl.struct(alleles=[ht.alleles[1], ht.alleles[0]], flip_row=True),
)
.when(
hl.is_defined(ht_gnomad[ht.locus, hl.array([flip_text(ht.alleles[0]), flip_text(ht.alleles[1])])]),
hl.struct(alleles=[flip_text(ht.alleles[0]), flip_text(ht.alleles[1])], flip_row=False),
)
.when(
hl.is_defined(ht_gnomad[ht.locus, hl.array([flip_text(ht.alleles[1]), flip_text(ht.alleles[0])])]),
hl.struct(alleles=[flip_text(ht.alleles[1]), flip_text(ht.alleles[0])], flip_row=True),
)
.default(hl.struct(alleles=[ht.alleles[0], ht.alleles[1]], flip_row=False))
)
)
if flip_rows is not None:
ht = ht.annotate(**{row: hl.if_else(ht.flip_row, -ht[row], ht[row]) for row in flip_rows})
ht = ht.drop("flip_row")
return ht
def get_diag_mat(diag_vec: BlockMatrix):
x = diag_vec.T.to_numpy()
diag_mat = np.identity(len(x)) * np.outer(np.ones(len(x)), x)
return BlockMatrix.from_numpy(diag_mat)
def abf(beta, se, W=0.04):
z = beta / se
V = se ** 2
r = W / (W + V)
lbf = 0.5 * (np.log(1 - r) + (r * z ** 2))
denom = sp.special.logsumexp(lbf)
prob = np.exp(lbf - denom)
return lbf, prob
def get_cs(variant, prob, coverage=0.95):
ordering = np.argsort(prob)[::-1]
idx = np.where(np.cumsum(prob[ordering]) > coverage)[0][0]
cs = variant[ordering][: (idx + 1)]
return cs
def main(args):
hl._set_flags(no_whole_stage_codegen="1")
reference_genome = args.reference_genome
gnomad_version = gnomad_latest_versions[reference_genome]
gnomad_ht_path = f"gs://finucane-requester-pays/slalom/gnomad/release/{gnomad_version}/ht/genomes/gnomad.genomes.r{gnomad_version}.sites.most_severe.ht"
ht_snp = hl.import_table(args.snp, impute=True, types={"chromosome": hl.tstr}, delimiter="\s+")
ht_snp = ht_snp.annotate(
locus=hl.parse_locus(
hl.delimit([ht_snp.chromosome, hl.str(ht_snp.position)], delimiter=":"), reference_genome=reference_genome
),
alleles=[ht_snp.allele1, ht_snp.allele2],
)
if args.align_alleles:
ht_gnomad = hl.read_table(gnomad_ht_path)
ht_snp = align_alleles(ht_snp, ht_gnomad, flip_rows=["beta"])
ht_snp = ht_snp.annotate(variant=hl.variant_str(ht_snp.locus, ht_snp.alleles))
ht_snp = ht_snp.key_by("locus", "alleles")
ht_snp = ht_snp.add_index("idx_snp")
# annotate in novel CUPs and reject
cup = hl.read_table(f"gs://finucane-requester-pays/slalom/cup_files/FASTA_BED.ALL_{reference_genome}.novel_CUPs.ht")
reject = hl.read_table(
f"gs://finucane-requester-pays/slalom/cup_files/FASTA_BED.ALL_{reference_genome}.reject_2.ht"
)
ht_snp = ht_snp.annotate(in_cups=hl.is_defined(cup[ht_snp.locus]) | hl.is_defined(reject[ht_snp.locus]))
# annotate vep and freq
if args.annotate_consequence or args.annotate_gnomad_freq:
ht_gnomad = hl.read_table(gnomad_ht_path)
consequences = ["most_severe", "gene_most_severe", "consequence"] if args.annotate_consequence else []
freq_expr = (
{f"gnomad_v{gnomad_version[0]}_af_{pop}": ht_gnomad.freq[pop].AF for pop in gnomad_pops[reference_genome]}
if args.annotate_gnomad_freq
else {}
)
ht_gnomad = ht_gnomad.select(*consequences, **freq_expr)
ht_snp = ht_snp.join(ht_gnomad, how="left")
ht_snp = ht_snp.checkpoint(new_temp_file())
df = ht_snp.key_by().drop("locus", "alleles", "idx_snp").to_pandas()
if args.abf:
lbf, prob = abf(df.beta, df.se, W=args.abf_prior_variance)
cs = get_cs(df.variant, prob, coverage=0.95)
cs_99 = get_cs(df.variant, prob, coverage=0.99)
df["lbf"] = lbf
df["prob"] = prob
df["cs"] = df.variant.isin(cs)
df["cs_99"] = df.variant.isin(cs_99)
if args.lead_variant is None:
if args.lead_variant_choice == "p":
lead_idx_snp = df.p.idxmin()
elif args.lead_variant_choice == "prob":
lead_idx_snp = df.prob.idxmax()
elif args.lead_variant_choice in ["gamma", "gamma-p"]:
lead_idx_snp = df.index[df.gamma]
if len(lead_idx_snp) == 0:
if args.lead_variant_choice == "gamma-p":
lead_idx_snp = df.p.idxmin()
else:
raise ValueError("No lead variants found with gamma.")
elif len(lead_idx_snp) > 1:
raise ValueError("Multiple lead variants found with gamma.")
else:
lead_idx_snp = lead_idx_snp[0]
args.lead_variant = df.variant[lead_idx_snp]
else:
lead_idx_snp = df.index[df.variant == args.lead_variant]
df["lead_variant"] = False
df["lead_variant"].iloc[lead_idx_snp] = True
# annotate LD
r2_label = "r2" if not args.export_r else "r"
if args.ld_reference == "gnomad":
ld_matrices = [
f"gs://gcp-public-data--gnomad/release/2.1.1/ld/gnomad.genomes.r2.1.1.{pop}.common.ld.bm"
for pop in gnomad_pops["GRCh37"]
]
ld_variant_indices = [
gnomad_ld_variant_indices[reference_genome].format(pop=pop) for pop in gnomad_pops["GRCh37"]
]
ld_labels = [f"gnomad_lead_{r2_label}_{pop}" for pop in gnomad_pops["GRCh37"]]
else:
ld_matrices = [args.custom_ld_path]
ld_variant_indices = [args.custom_ld_variant_index_path]
ld_labels = [f"{args.custom_ld_label}_lead_{r2_label}"]
for ld_bm_path, ld_ht_path, col in zip(ld_matrices, ld_variant_indices, ld_labels):
ht = hl.read_table(ld_ht_path)
ht = ht_snp.join(ht, "inner")
ht = ht.checkpoint(new_temp_file())
lead_idx = ht.filter(hl.variant_str(ht.locus, ht.alleles) == args.lead_variant).head(1).idx.collect()
if len(lead_idx) == 0:
df[col] = np.nan
continue
idx = ht.idx.collect()
idx2 = sorted(list(set(idx)))
bm = BlockMatrix.read(ld_bm_path)
bm = bm.filter(idx2, idx2)
if not np.all(np.diff(idx) > 0):
order = np.argsort(idx)
rank = np.empty_like(order)
_, inv_idx = np.unique(np.sort(idx), return_inverse=True)
rank[order] = inv_idx
mat = bm.to_numpy()[np.ix_(rank, rank)]
bm = BlockMatrix.from_numpy(mat)
# re-densify triangluar matrix
bm = bm + bm.T - get_diag_mat(bm.diagonal())
bm = bm.filter_rows(np.where(np.array(idx) == lead_idx[0])[0].tolist())
idx_snp = ht.idx_snp.collect()
r2 = bm.to_numpy()[0]
if not args.export_r:
r2 = r2 ** 2
df[col] = np.nan
df[col].iloc[idx_snp] = r2
if args.weighted_average_r is not None:
n_samples = []
ld = []
for k, v in args.weighted_average_r.items():
if isinstance(v, str):
if v not in df.columns:
print(f"Column {v} not found.")
continue
n_samples.append(df[v].values)
else:
n_samples.append(np.tile(v, len(df.index)))
ld.append(df[f"gnomad_lead_r_{k}"].values)
if len(n_samples) == 1:
df["r"] = ld[0]
else:
n_samples = np.array(n_samples).T
ld = np.array(ld).T
df["r"] = np.nansum(n_samples * ld, axis=1) / np.nansum(n_samples * ~np.isnan(ld), axis=1)
elif args.ld_reference == "custom":
df["r"] = df[ld_labels[0]]
else:
df["r"] = df["gnomad_lead_r_nfe"]
if args.dentist_s:
lead_z = (df.beta / df.se).iloc[lead_idx_snp]
df["t_dentist_s"] = ((df.beta / df.se) - df.r * lead_z) ** 2 / (1 - df.r ** 2)
df["t_dentist_s"] = np.where(df["t_dentist_s"] < 0, np.inf, df["t_dentist_s"])
df["t_dentist_s"].iloc[lead_idx_snp] = np.nan
df["nlog10p_dentist_s"] = sp.stats.chi2.logsf(df["t_dentist_s"], df=1) / -np.log(10)
if args.out.startswith("gs://"):
fopen = hl.hadoop_open
else:
fopen = open
with fopen(args.out, "w") as f:
df.drop(columns=["variant"]).to_csv(f, sep="\t", na_rep="NA", index=False)
if args.summary:
df["r2"] = df.r ** 2
if args.case_control:
df["n_eff_samples"] = df.n_samples * (df.n_cases / df.n_samples) * (1 - df.n_cases / df.n_samples)
else:
df["n_eff_samples"] = df.n_samples
n_r2 = np.sum(df.r2 > args.r2_threshold)
n_dentist_s_outlier = np.sum(
(df.r2 > args.r2_threshold) & (df.nlog10p_dentist_s > args.nlog10p_dentist_s_threshold)
)
max_pip_idx = df.prob.idxmax()
nonsyn_idx = (df.r2 > args.r2_threshold) & df.consequence.isin(["pLoF", "Missense"])
variant = df.chromosome.str.cat([df.position.astype(str), df.allele1, df.allele2], sep=":")
n_eff_r2 = df.n_eff_samples.loc[df.r2 > args.r2_threshold]
df_summary = pd.DataFrame(
{
"lead_pip_variant": [variant.iloc[max_pip_idx]],
"n_total": [len(df.index)],
"n_r2": [n_r2],
"n_dentist_s_outlier": [n_dentist_s_outlier],
"fraction": [n_dentist_s_outlier / n_r2 if n_r2 > 0 else 0],
"n_nonsyn": [np.sum(nonsyn_idx)],
"max_pip": [np.max(df.prob)],
"max_pip_nonsyn": [np.max(df.prob.loc[nonsyn_idx])],
"cs_nonsyn": [np.any(df.cs.loc[nonsyn_idx])],
"cs_99_nonsyn": [np.any(df.cs_99.loc[nonsyn_idx])],
"nonsyn_variants": [",".join(variant.loc[nonsyn_idx].values)],
"min_neff_r2": [np.nanmin(n_eff_r2) if n_r2 > 0 else np.nan],
"max_neff_r2": [np.nanmax(n_eff_r2)] if n_r2 > 0 else np.nan,
}
)
with fopen(args.out_summary, "w") as f:
df_summary.to_csv(f, sep="\t", na_rep="NA", index=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--snp", type=str, required=True, help="Input snp file from fine-mapping")
parser.add_argument("--out", type=str, required=True, help="Output path")
parser.add_argument("--out-summary", type=str, help="Output summary path")
parser.add_argument("--delimiter", type=str, default=" ", help="Delimiter for output ld matrix")
parser.add_argument("--lead-variant", type=str, help="Lead variant to annotate gnomAD LD")
parser.add_argument(
"--lead-variant-choice",
type=str,
default="p",
choices=["p", "prob", "gamma", "gamma-p"],
help="Strategy for choosing a lead variant",
)
parser.add_argument("--align-alleles", action="store_true", help="Whether to align alleles with gnomAD")
parser.add_argument("--annotate-consequence", action="store_true", help="Whether to annotate VEP consequences")
parser.add_argument("--annotate-gnomad-freq", action="store_true", help="Whether to annotate gnomAD frequencies")
parser.add_argument(
"--ld-reference", type=str, default="gnomad", choices=["gnomad", "custom"], help="Choice of LD reference"
)
parser.add_argument("--custom-ld-path", type=str, help="Path of user-provided LD BlockMatrix")
parser.add_argument("--custom-ld-variant-index-path", type=str, help="Path of user-provided LD variant index table")
parser.add_argument("--custom-ld-label", type=str, help="Label of user-provided LD")
parser.add_argument("--export-r", action="store_true", help="Export signed r values instead of r2")
parser.add_argument("--weighted-average-r", type=str, nargs="+", action=ParseKwargs, help="")
parser.add_argument("--dentist-s", action="store_true", help="Annotate DENTIST-S statistics")
parser.add_argument("--abf", action="store_true", help="Run ABF")
parser.add_argument("--abf-prior-variance", type=float, default=0.04, help="Prior effect size variance for ABF")
parser.add_argument(
"--reference-genome",
type=str,
default="GRCh37",
choices=["GRCh37", "GRCh38"],
help="Reference genome of sumstats",
)
parser.add_argument("--summary", action="store_true", help="Whether to output a summary file")
parser.add_argument("--case-control", action="store_true", help="Whether the input is from a case-control study")
parser.add_argument(
"--r2-threshold", type=float, default=0.6, help="r2 threshold of DENTIST-S outlier variants for prediction"
)
parser.add_argument(
"--nlog10p-dentist-s-threshold",
type=float,
default=4,
help="-log10 DENTIST-S P value threshold of DENTIST-S outlier variants for prediction",
)
args = parser.parse_args()
if args.out_summary is None:
args.out_summary = f"{os.path.splitext(args.out)[0]}.summary.txt"
if args.ld_reference == "custom" and (
(args.custom_ld_path is None) or (args.custom_ld_variant_index_path is None) or (args.custom_ld_label is None)
):
raise argparse.ArgumentError(
"All of --custom-ld-path, --custom-ld-variant-index-path, and --custom-ld-label should be provided"
)
main(args)
|
from drivers.driver import IDriver
from selenium import webdriver
class DriverChrome(IDriver):
def __init__(self):
self.driver =None
def instanceDriver(self):
self.driver = webdriver.Chrome(executable_path=r'C:\Users\pc\Desktop\django\chromedriver.exe')
def freeDriver(self):
self.driver.quit()
def returnDriver(self):
return self.driver
|
from discord.ext import commands
from DaveBOT import checks
class Admin:
"""Admin-only commands."""
def __init__(self, bot):
self.client = bot
@commands.command(hidden=True)
@checks.adminonly()
async def load(self, *, module: str):
"""Load a module."""
try:
self.client.load_extension(module)
except Exception as e:
await self.client.say(f"{type(e).__name__}: {e}")
else:
await self.client.say("Module loaded.")
@commands.command(hidden=True)
@checks.adminonly()
async def unload(self, *, module: str):
"""Unload a module."""
try:
self.client.unload_extension(module)
except Exception as e:
await self.client.say(f"{type(e).__name__}: {e}")
else:
await self.client.say("Module unloaded.")
@commands.command(hidden=True)
@checks.adminonly()
async def reload(self, *, module: str):
"""Reload a module."""
try:
self.client.unload_extension(module)
self.client.load_extension(module)
except Exception as e:
await self.client.say(f"{type(e).__name__}: {e}")
else:
await self.client.say("Module reloaded.")
def setup(bot):
bot.add_cog(Admin(bot))
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
wiki = requests.get('https://en.wikipedia.org/wiki/List_of_mass_shootings_in_the_United_States')
soup = BeautifulSoup(wiki.content, 'html.parser')
tables = soup.find_all('table', class_='wikitable sortable')
alltables=pd.DataFrame()
for x in tables:
df = pd.read_html(str(x))
alltables=alltables.append(df,ignore_index=True)
print(alltables)
alltables.to_csv('data.csv')
|
import numpy as np
from copy import deepcopy
import lasagne
from braindecode.veganlasagne.layers import get_all_paths
from braindecode.veganlasagne.layer_util import set_to_new_input_layer
def get_longest_path(final_layer):
all_paths = get_all_paths(final_layer)
path_lens = [len(p) for p in all_paths]
i_longest = np.argmax(path_lens)
return all_paths[i_longest]
def create_adversarial_model(final_layer, i_split_layer):
final_adv = deepcopy(final_layer)
longest_path_seiz = get_longest_path(final_layer)
longest_path_adv = get_longest_path(final_adv)
longest_path_adv[i_split_layer+1].input_layer = longest_path_seiz[
i_split_layer]
# just in case there is a hanging input layer
# maybe this is not the full fix to the problem
# of multiple paths through the final layer network
# mostly just a hack for now
in_l_main = [l for l in lasagne.layers.get_all_layers(final_layer)
if l.__class__.__name__ == 'InputLayer']
assert len(in_l_main) == 1
set_to_new_input_layer(final_adv, in_l_main[0])
# check if everything is correct, layers up to i split layer
# are shared, later ones not
longest_path_adv = get_longest_path(final_adv)
for i_layer in range(i_split_layer+1):
assert longest_path_adv[i_layer] == longest_path_seiz[i_layer]
for i_layer in range(i_split_layer+1, len(longest_path_adv)):
assert longest_path_adv[i_layer] != longest_path_seiz[i_layer]
return final_adv
|
import urllib
import urllib2
import hashlib
url = 'http://219.223.254.66/cgi-bin/srun_portal'
val = {
'action' : 'logout'
}
data = urllib.urlencode(val)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
the_page = response.read()
print the_page
|
from django.urls import path
from reddituser import views
urlpatterns = [
path('<str:username>/', views.user_profile_view, name='user_profile'),
path('<str:username>/delete/', views.delete_profile_view, name='delete_profile'),
]
|
# Generated by Django 2.2.1 on 2019-05-09 06:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portalapp', '0011_auto_20190507_1806'),
]
operations = [
migrations.AddField(
model_name='student',
name='mentor',
field=models.CharField(default=' ', max_length=100),
),
]
|
import time
import datetime
def create_timestamp():
today_now = datetime.datetime.now()
# __secs = today_now.second
# __minutes = today_now.minute
# __hour = today_now.hour
# __days = today_now.day
# __month = today_now.month
# __year = today_now.year
return today_now.timestamp()
def coin_data_formatter(list__):
"""
The function would take a coin dictionary parameter and return formated string
which would be used for hashing
"""
data_to_hash = " _ ".join(list__)
return data_to_hash
|
#!/usr/bin/env python
#coding=utf-8
import os
import logging
import re
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
def config_stats():
"""
config the all port of config.py in the marathon-lb
:None
"""
if not os.path.isfile('config.py'):
logging.debug("The file: config.py doesn't exist...")
return
try:
with open('config.py','r+') as fd:
flag_stats=False
for line in fd.readlines():
with open('config_tmp.py','a+') as ftmp:
stats_r=re.match("listen stats\n",line)
if stats_r or flag_stats:
logging.debug("Find the line:{}".format(line))
if stats_r:
flag_stats=True
ftmp.write(line)
continue
elif flag_stats:
stat_bind=re.match(" bind ",line)
if stat_bind:
logging.debug("Find the line:{}".format(line))
stat_port=os.getenv("STATS")
logging.debug("Get the envvariable:{}".format(stat_port))
sub_r=" bind 0.0.0.0:{}".format(stat_port)
line=re.sub(r" bind 0.0.0.0:10002",sub_r,line)
logging.debug("The dealed line :{}".format(sub_r))
ftmp.write(line)
flag_stats=False
else:
ftmp.wrie(line)
else:
ftmp.write(line)
except Exception:
logging.debug("Open or Read the config.py Failed!...")
os.system("rm -rf config.py")
os.system("cp config_tmp.py config.py")
os.system("rm -rf config_tmp.py")
def config_marathon_http_in():
"""
config the all port of config.py in the marathon-lb
:None
"""
if not os.path.isfile('config.py'):
logging.debug("The file: config.py doesn't exist...")
return
try:
with open('config.py','r+') as fd:
flag_marathon_http_in=False
for line in fd.readlines():
with open('config_tmp.py','a+') as ftmp:
marathon_http_in_r=re.match("frontend marathon_http_in\n",line)
if marathon_http_in_r or flag_marathon_http_in:
logging.debug("Find the line:{}".format(line))
if marathon_http_in_r:
flag_marathon_http_in=True
ftmp.write(line)
continue
elif flag_marathon_http_in:
stat_bind_marathon_http_in=re.match(" bind ",line)
if stat_bind_marathon_http_in:
logging.debug("Find the line:{}".format(line))
marathon_http_in_port=os.getenv("MARATHON_HTTP_IN")
logging.debug("Get the envvariable:{}".format(marathon_http_in_port))
marathon_http_in_sub_r=" bind *:{}".format(marathon_http_in_port)
marathon_http_in_line=re.sub(r" bind \*:8080",marathon_http_in_sub_r,line)
logging.debug("The dealed line :{}".format(marathon_http_in_line))
ftmp.write(marathon_http_in_line)
flag_marathon_http_in=False
else:
ftmp.wrie(line)
else:
ftmp.write(line)
except Exception:
logging.debug("Open or Read the config.py Failed!...")
os.system("rm -rf config.py")
os.system("cp config_tmp.py config.py")
os.system("rm -rf config_tmp.py")
def config_marathon_http_appid_in():
"""
config the marathon_http_appid_in's port of config.py in the marathon-lb
:None
"""
if not os.path.isfile('config.py'):
logging.debug("The file: config.py doesn't exist...")
return
try:
with open('config.py','r+') as fd:
flag_marathon_http_appid_in=False
for line in fd.readlines():
with open('config_tmp.py','a+') as ftmp:
marathon_http_appid_in_r=re.match("frontend marathon_http_appid_in\n",line)
if marathon_http_appid_in_r or flag_marathon_http_appid_in:
logging.debug("Find the line:{}".format(line))
if marathon_http_appid_in_r:
flag_marathon_http_appid_in=True
ftmp.write(line)
continue
elif flag_marathon_http_appid_in:
stat_bind_marathon_http_appid_in=re.match(" bind ",line)
if stat_bind_marathon_http_appid_in:
logging.debug("Find the line:{}".format(line))
marathon_http_appid_in_port=os.getenv("MARATHON_HTTP_APPID_IN")
logging.debug("Get the envvariable:{}".format(marathon_http_appid_in_port))
marathon_http_appid_in_sub_r=" bind *:{}".format(marathon_http_appid_in_port)
marathon_http_appid_in_line=re.sub(r" bind \*:9091",marathon_http_appid_in_sub_r,line)
logging.debug("The dealed line :{}".format(marathon_http_appid_in_line))
ftmp.write(marathon_http_appid_in_line)
flag_marathon_http_appid_in=False
else:
ftmp.wrie(line)
else:
ftmp.write(line)
except Exception:
logging.debug("Open or Read the config.py Failed!...")
os.system("rm -rf config.py")
os.system("cp config_tmp.py config.py")
os.system("rm -rf config_tmp.py")
def config_marathon_https_in():
"""
config the all port of config.py in the marathon-lb
:None
"""
if not os.path.isfile('config.py'):
logging.debug("The file: config.py doesn't exist...")
return
try:
with open('config.py','r+') as fd:
flag_marathon_https_in=False
for line in fd.readlines():
with open('config_tmp.py','a+') as ftmp:
marathon_https_in_r=re.match("frontend marathon_https_in\n",line)
if marathon_https_in_r or flag_marathon_https_in:
logging.debug("Find the line:{}".format(line))
if marathon_https_in_r:
flag_marathon_https_in=True
ftmp.write(line)
continue
elif flag_marathon_https_in:
stat_bind_marathon_https_in=re.match(" bind ",line)
if stat_bind_marathon_https_in:
logging.debug("Find the line:{}".format(line))
marathon_https_in_port=os.getenv("MARATHON_HTTPS_IN")
logging.debug("Get the envvariable:{}".format(marathon_https_in_port))
marathon_https_in_sub_r=" bind *:{} ssl ".format(marathon_https_in_port)+"{"+"sslCert"+"}"
logging.debug("The marathon_https_in_sub_r : {}".format(marathon_https_in_sub_r))
marathon_https_in_line=re.sub(r" bind \*:443 ssl \{sslCerts\}",marathon_https_in_sub_r,line)
logging.debug("The dealed line :{}".format(marathon_https_in_line))
ftmp.write(marathon_https_in_line)
flag_marathon_https_in=False
else:
ftmp.wrie(line)
else:
ftmp.write(line)
except Exception:
logging.debug("Open or Read the config.py Failed!...")
os.system("rm -rf config.py")
os.system("cp config_tmp.py config.py")
os.system("rm -rf config_tmp.py")
if __name__=='__main__':
config_stats()
config_marathon_http_in()
config_marathon_http_appid_in()
#config_marathon_https_in()
|
# import pytest, sys
from helpers_for_tests import reset_queries, run_args_on_parser as runargs
# sys.path.insert(1, './backup')
# from parser import create_parser
def test_no_args():
result = runargs([])
assert "No arguments were provided." in result.err
def test_check_if_enter_something_other_than_config_add_update_remove_run():
# with pytest.raises(SystemExit):
# parser.parse_args(['foo'])
# out, err = capfd.readouterr()
result = runargs(["foo"])
assert "invalid choice: 'foo'" in result.err
# assert "error: argument command: invalid choice: 'foo'" in out
|
def evaporator(content, evap_per_day, threshold):
day = 0
evap_per_day /= 100.0
threshold = content * (threshold / 100.0)
while content >= threshold:
content -= content * evap_per_day
day += 1
return day
|
from src.ChessQueenWorld import ChessQueenWorld
cqw = ChessQueenWorld()
#cqw.solve_sample_board(0)
cqw.bulk_solve(10)
|
import requests
import re
from urllib.parse import urlparse
import sys
import keyboard
url = "https://ifunny.co/"
file = open("links.txt", "a+")
visited = []
iteration = 0
start_index = 0
recursive_depth = -1
def scrape(links):
global iteration
output = []
valid = True
for link in links:
if link not in visited or link == url:
try:
parsed = urlparse(link)
base = f"{parsed.scheme}://{parsed.netloc}"
html = requests.get(link)
output = re.findall('''<a\s+(?:[^>]*?\s+)?href="([^"]*)"''', str(html.content))
temp = []
for i in output:
if i.find('/picture/') != -1:
temp.append(i)
output = temp
for o in range(len(output)):
if not urlparse(output[o]).netloc:
link_with_base = base + output[o]
output[o] = link_with_base
file = open("links.txt", "a+")
file.write(str(output))
file.close()
except:
valid = False
pass
if valid == True:
print("Scraped "+str(link))
resp = requests.post('https://qxvxx2xw1g.execute-api.us-east-2.amazonaws.com/basic/submit', json={"body":output})
print(output)
visited.append(link)
iteration += 1
if iteration < recursive_depth:
print("Recursion Reset.")
iteration = 0
scrape([url])
scrape(output[start_index:])
else:
pass
scrape([url])
|
{
'targets': [{
'target_name': 'test',
'type': 'executable',
'dependencies': [
'testlib/testlib.gyp:proxy',
'proxy/proxy.gyp:testlib',
],
}],
}
|
from unittest import TestCase
from phi.field._field_math import data_bounds
from phi.field._point_cloud import distribute_points
from phi.flow import *
def step(particles: PointCloud, obstacles: list, dt: float, **grid_resolution):
# --- Grid Operations ---
velocity = prev_velocity = field.finite_fill(resample(particles, StaggeredGrid(0, 0, particles.bounds, **grid_resolution), outside_handling='clamp', scatter=True))
occupied = resample(field.mask(particles), CenteredGrid(0, velocity.extrapolation.spatial_gradient(), velocity.bounds, velocity.resolution), scatter=True)
velocity, pressure = fluid.make_incompressible(velocity + (0, -9.81 * dt), obstacles, active=occupied)
# --- Particle Operations ---
particles += resample(velocity - prev_velocity, particles) # FLIP update
# particles = velocity @ particles # PIC update
particles = advect.points(particles, velocity * field.mask(~union(obstacles)), dt, advect.finite_rk4)
particles = fluid.boundary_push(particles, obstacles + [~particles.bounds])
return particles
class FlipTest(TestCase):
def test_single_particles(self):
""" Tests if single particles at the boundaries and within the domain really fall down. """
particles = initial_particles = distribute_points(union(Box['x,y', 0:1, 10:11], Box['x,y', 31:32, 20:21], Box['x,y', 10:11, 10:11]), x=32, y=32, points_per_cell=1) * (0, 0)
self.assertEqual(3, particles.points.points.size)
for i in range(10):
particles = step(particles, [], x=32, y=32, dt=0.05)
assert math.all(particles.points.vector[1] < initial_particles.points.vector[1])
def test_pool(self):
""" Tests if a pool of liquid at the bottom stays constant over time. """
particles = initial_particles = distribute_points(Box['x,y', :, :10], x=32, y=32) * (0, 0)
for i in range(100):
particles = step(particles, [], x=32, y=32, dt=0.05)
occupied_start = initial_particles.with_values(1) @ CenteredGrid(0, 0, x=32, y=32)
occupied_end = particles.with_values(1) @ CenteredGrid(0, 0, x=32, y=32)
math.assert_close(occupied_start.values, occupied_end.values)
math.assert_close(initial_particles.points, particles.points, abs_tolerance=1e-3)
def test_falling_block_long(self):
""" Tests if a block of liquid has a constant shape during free fall. """
particles = initial_particles = distribute_points(Box['x,y', 12:20, 110:120], x=32, y=128) * (0, 0)
initial_bounds = data_bounds(particles)
for i in range(90):
particles = step(particles, [], x=32, y=128, dt=0.05)
math.assert_close(data_bounds(particles).size, initial_bounds.size) # shape of falling block stays the same
assert math.max(particles.points, dim='points').vector['y'] < math.max(initial_particles.points, dim='points').vector['y'] # block really falls
def test_block_and_pool(self):
""" Tests if the impact of a block on a pool has no side-effects (e.g. liquid explosion). """
particles = distribute_points(union(Box['x,y', :, :5], Box['x,y', 12:18, 15:20]), x=32, y=32) * (0, 0)
for i in range(100):
particles = step(particles, [], x=32, y=32, dt=0.05)
assert math.all(particles.points.vector[1] < 15)
def test_symmetry(self):
""" Tests the symmetry of a setup where a liquid block collides with 2 rotated obstacles. """
OBSTACLES = [Box['x,y', 20:30, 10:12].rotated(math.tensor(20)), Box['x,y', 34:44, 10:12].rotated(math.tensor(-20))]
x_low = 26
x_high = 38
y_low = 40
y_high = 50
particles = distribute_points(Box['x,y', x_low:x_high, y_low:y_high], x=64, y=64, center=True) * (0, 0)
x_num = int((x_high - x_low) / 2)
y_num = y_high - y_low
particles_per_cell = 8
total = x_num * y_num
for i in range(100):
print(i)
particles = step(particles, OBSTACLES, x=64, y=64, dt=0.05)
left = particles.points.points[particles.points.vector[0] < 32]
right = particles.points.points[particles.points.vector[0] > 32]
self.assertEqual(left.points.size, right.points.size)
mirrored = math.copy(right).numpy('points,vector')
mirrored[:, 0] = 64 - right[:, 0]
smirrored = np.zeros_like(mirrored)
# --- particle order of mirrored version differs from original one and must be fixed for MSE
# (caused by ordering in phi.physics._boundaries _distribute_points) ---
for p in range(particles_per_cell):
for b in range(x_num):
smirrored[p * total + b * y_num:p * total + (b + 1) * y_num] = mirrored[(p + 1) * total - (b + 1) * y_num:(p + 1) * total - b * y_num]
mse = np.square(smirrored - left.numpy('points,vector')).mean()
if i < 45:
assert mse == 0 # block is still falling, hits obstacles at step 46
else:
# ToDo this currently fails
assert mse <= 1e-3 # error increases gradually after block and obstacles collide
|
bu dosyaya yazdigimiz ilk satir.
|
# coding=utf-8
import math
import os
from src.util import common
def split_by_proportion(src_file_path, target_dir_path, split_file_cnt):
"""按照相等的概率(拟合频率)将文件 src_file 划分为 split_file_cnt 个文件, 并保存在 target_dir_path 目录下.
不保证每个文件的行数严格相等, 只保证将 1 行分配到各文件的概率相等.
"""
with open(src_file_path) as file:
split_files = [open(os.path.join(target_dir_path, 'part-%d.split' % file_no), 'w')
for file_no in range(split_file_cnt)]
for line in file:
file_no = int(math.floor(common.new_proportion() * split_file_cnt))
split_files[file_no].write(line)
[split_file.close() for split_file in split_files]
|
# coding:utf-8
# 导入Numpy(数学运算)和Matplotlib的pyplot两个模块
# matplotlib.pyplot.plot(x, y, label="标签颜色", color="折线颜色", linestyle="折线类型", linewidth="线宽",
# marker="标记点符号", markersize="标记点大小")
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# 设置字体
plt.rcParams['font.sans-serif'] = ['SimHei'] # 显示中文标签
plt.rcParams['axes.unicode_minus'] = False
# 1.使用plot来绘制折线
# 修改标签文字和线条粗细
plt.title("squre number", fontsize=24)
plt.xlabel("Value", fontsize=14)
plt.ylabel("square of value", fontsize=14)
plt.tick_params(axis='both', labelsize=14)
x_values = [1, 2, 3, 4, 5]
y_values = [1, 4, 9, 16, 25]
plt.plot(x_values, y_values, linewidth=5)
print("===================折线图1=========================")
plt.show()
# 2.使用Matplotlib绘制一个正弦和余弦函数曲线
# 创建X轴的数据:从-PI到PI的256个等差数字
x = np.linspace(-np.pi, np.pi, 256, endpoint=True)
# 使用cos和sin函数以x为自变量创建C和S
C, S = np.cos(x), np.sin(x)
zhfont1 = matplotlib.font_manager.FontProperties(fname="../font/SimHei.ttf")
# 修改标签文字和线条粗细
plt.title("正弦与余弦曲线", fontsize=24)
# 使用plot()分别绘制正弦和余弦函数
plt.plot(x, C)
plt.plot(x, S)
print("===================正弦与余弦曲线===================")
plt.show()
# 3.绘制一段最基本的折线图
# 创建1个点数 8 x 6 的窗口,并设置分辨率为80像素/英寸
plt.figure(figsize=(8, 6), dpi=80)
# 创建X轴的数据:从-2到6的5个等差数字,分别为-2,0,2,4,6
x = np.linspace(-2, 6, 5)
# 绘制直线1
y1 = x + 3
# 绘制直线2
y2 = 3 - x
# 绘制绿色,宽度为1个像素的实线
plt.plot(x, y1, color="green", linewidth=1.0, linestyle="-", label="y1")
# 绘制红色,宽度为2个像素的虚线
plt.plot(x, y2, color="red", linewidth=2.0, linestyle="--", label="y2")
# 设置横轴的上下限为-1~6
plt.xlim(-1, 6)
# 设置纵轴的上下限为-2~10
plt.ylim(-2, 10)
# 设置图例
plt.legend(loc="upper left")
# 注释特殊点位
# scatter([x][y],s="点的大小")函数用于绘制散点图
plt.scatter([3], [6], s=30, color="blue")
plt.scatter([3], [0], s=30, color="red")
# annotate("标注内容",xy=(要在哪个位置点标注内容))
plt.annotate("(3,6)", xy=(3.3, 5.5), fontsize=16)
plt.annotate("(3,0)", xy=(3.3, 0), fontsize=16)
# 想给点添加注释,需要使用text(x,y,s)函数
plt.text(4, -0.5, "该处为重要点位", fontdict={'size': 12, 'color': 'green'})
# 保存图表
# plt.savefig()函数:
# 支持png/pdf/svg/ps等,以后缀名来指定
# dpi=分辨率,
# bbox_inches='tight',尝试剪除图表周围的空白部分
# facecolor/edgecolor:
plt.savefig("pic.png", dpi=100, bbox_inches='tight', facecolor="purple", edgecolor="blue")
print("===================折线图2=========================")
plt.show()
# 4.使用bar()函数绘制一个柱状图
# 创建1个点数 8 x 6 的窗口,并设置分辨率为80像素/英寸
plt.figure(figsize=(8, 6), dpi=80)
# 设置柱子总数
N = 6
# 包含每个柱子对应值的序列
values = (5, 16, 20, 25, 23, 28)
# 包含每个柱子下标的序列
index = np.arange(N)
# 柱子的宽度
width = 0.55
# 绘制柱状图,每根柱子的颜色为蓝色
ps = plt.bar(index, values, width, label="月均气温", color="#87CEFA")
# 设置横轴标签
plt.xlabel("月份")
# 设置纵轴标签
plt.ylabel("温度(摄氏度)")
# 添加标题
plt.title("月均气温")
# 添加纵横轴的刻度(1st列表的值代表刻度,2nd列表的值代表所显示的标签)
plt.xticks(index, ('一月', '二月', '三月', '四月', '五月', '六月'))
# arange函数用于创建等差数组:np.arange([start, ]stop, [step, ]dtype=None)
plt.yticks(np.arange(0, 50, 10))
# 添加图例
plt.legend(['温度'], loc="upper right")
print("===================柱状图==========================")
plt.show()
# 5.使用pie()函数绘制一个柱状图
labels = '大一', '大二', '大三', '大四' # labels设置各个分片的标签
sizes = [15, 30, 45, 10] # 数值列表
# 将"大二"突出显示
explode = (0, 0.1, 0, 0) # explode指定饼图中突出的分片
# autopct设置标签中的数字格式; shadow设置是否有阴影;startangle设置从哪个角度开始绘制饼图
plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
plt.axis('equal') # 确保饼图是个圆形
plt.title('饼图示例')
print("===================饼图===========================")
plt.show()
|
# FOR Loops or count controlled iteration
# FOR loops will run for a predetermined number of times
# FOR loops can also use break and continue as covered in 02
# i is a variable, you can pass one in or create a new one. i is typically used as it relates to "index".
# the range() creates a sequence of values to iterate through (0, 1, 2, 3..)
for i in range(5):
print(i)
print("==============")
# Range can have 2 values passed into. Value 1 is where it will start, Value 2 is where it will enf
for i in range(5, 10):
print(i)
print("==============")
# Range can actually have 3 values passed into it. The third value is the STEP argument which states how many it changes each time
# Such as move in 2s
for i in range(0, 10, 2):
print(i)
print("==============")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-03-20 06:55
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('fitbit', '0003_auto_20180313_0749'),
]
operations = [
migrations.CreateModel(
name='UserFitbitDataSleep',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_of_sleep', models.TextField()),
('data', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fitbit_sleep_data', to=settings.AUTH_USER_MODEL)),
],
),
]
|
import numpy as np
from scipy.integrate import odeint, solve_ivp
import pandas as pd
import matplotlib.pyplot as plt
import datetime
def derivSIR(y, t, N, beta, gamma):
S, I, R = y
dSdt = -beta * S * I / N
dIdt = beta * S * I / N - gamma * I
dRdt = gamma * I
return dSdt, dIdt, dRdt
def derivSIR_RK45(t, y, N, beta, gamma):
S, I, R = y
dSdt = -beta * S * I / N
dIdt = beta * S * I / N - gamma * I
dRdt = gamma * I
return dSdt, dIdt, dRdt
def derivSI_RK45(t, y, N, beta):
S, I = y
dSdt = -beta * S * I / N
dIdt = beta * S * I / N
return dSdt, dIdt
def derivSI(y, t, N, beta):
S, I = y
dSdt = -beta * S * I / N
dIdt = beta * S * I / N
return dSdt, dIdt
#Modelos SI y SIR
#Lectura de datos SALUDATA
data_covid = pd.read_csv('/Users/safer/Desktop/Quinto Semestre Ingeniería de Sistemas/Análisis Numérico/Referencias/covid_19_bog.csv', encoding='cp1252', sep=';')
data_covid['FECHA_DIAGNOSTICO'] = pd.to_datetime(data_covid['FECHA_DIAGNOSTICO'], format='%d/%m/%Y')
#Datos iniciales
pob = 7743955
rec = 0
inf = 1
sus = pob - inf - rec
t = np.arange(0, 360)
r = [pob, inf]
v = [pob, inf, rec]
#Indices de contacto y recuperacion
beta, gamma = 0.2775, 0.022
#Calculo de sistemas de ecuaciones con ODEINT
retSI = odeint(derivSI, r, t, args=(pob, beta))
retSIR = odeint(derivSIR, v, t, args=(pob, beta, gamma))
SS, II = retSI.T
S, I, R = retSIR.T
#Calculo de sistemas de ecuaciones con RK45
retSI_RK45 = solve_ivp(derivSI_RK45, (t[0], t[-1]), r, 'RK45', args=(pob, beta))
retSIR_RK45 = solve_ivp(derivSIR_RK45, (t[0], t[-1]), v, 'RK45', args=(pob, beta, gamma))
SS_RK45, II_RK45 = retSI_RK45.y[0], retSI_RK45.y[1]
S_RK45, I_RK45, R_RK45 = retSIR_RK45.y[0], retSIR_RK45.y[1], retSIR_RK45.y[2]
#Graficas ODEINT
dfSI = pd.DataFrame({
'Susceptibles': SS,
'Infectados': II,
'Dia': t
})
plt.style.use('ggplot')
dfSI.plot(x='Dia',
y=['Infectados', 'Susceptibles'],
color=['#bb6424', '#aac6ca', '#cc8ac0'],
kind='line',
stacked=False,
title="Modelo SI (Odeint)")
plt.show()
dfSIR = pd.DataFrame({
'Susceptibles': S,
'Infectados': I,
'Recuperados': R,
'Dia': t
})
plt.style.use('ggplot')
dfSIR.plot(x='Dia',
y=['Infectados', 'Susceptibles', 'Recuperados'],
color=['#bb6424', '#aac6ca', '#cc8ac0'],
kind='area',
stacked=False,
title="Modelo SIR (Odeint)")
plt.show()
#Graficas RK45
dfSI_RK45 = pd.DataFrame({
'Susceptibles': SS_RK45,
'Infectados': II_RK45,
'Dia': retSI_RK45.t
})
plt.style.use('ggplot')
dfSI.plot(x='Dia',
y=['Infectados', 'Susceptibles'],
color=['#bb6424', '#aac6ca', '#cc8ac0'],
kind='line',
stacked=False,
title="Modelo SI (RK45)")
plt.show()
dfSIR = pd.DataFrame({
'Susceptibles': S_RK45,
'Infectados': I_RK45,
'Recuperados': R_RK45,
'Dia': retSIR_RK45.t
})
plt.style.use('ggplot')
dfSIR.plot(x='Dia',
y=['Infectados', 'Susceptibles', 'Recuperados'],
color=['#bb6424', '#aac6ca', '#cc8ac0'],
kind='area',
stacked=False,
title="Modelo SIR (RK 45)")
plt.show()
#Calculo de errores
TEMP = 14
index = 0
errorSI = []
errorSIR = []
start_date = pd.to_datetime('2020-03-06')
end_date = pd.to_datetime('2020-03-20')
for i in range(10):
df_quincenal = data_covid.loc[(data_covid['FECHA_DIAGNOSTICO'] > start_date) & (data_covid['FECHA_DIAGNOSTICO'] < end_date)]
errorSI.append(abs(II[index] - len(df_quincenal)))
errorSIR.append(abs(I[index] - len(df_quincenal)))
start_date = end_date
end_date += datetime.timedelta(days=TEMP)
index += TEMP
dfER = pd.DataFrame({
'Errores SI': errorSI,
'Errores SIR': errorSIR,
})
print(dfER)
print("----------------------------------------------")
#Modelo Depredador-Presa
def euler_completo(x0, y0, h, f, g, a, b):
val_x = []
val_y = []
val_t = []
x = x0
y = y0
t = 0
while t < b:
val_t.append(t)
val_x.append(x)
val_y.append(y)
x = x + h * f(x,y)
y = y + h * g(x,y)
t += h
return val_t, val_x, val_y
def rungeKutta(f, g, x0, y0, a, b, h):
t = np.arange(a, b, h)
n = len(t)
x = np.zeros(n)
y = np.zeros(n)
x[0] = x0
y[0] = y0
for i in range(0, n - 1):
k1 = h*f(x[i], y[i])
l1 = h*g(x[i], y[i])
k2 = h*f(x[i] + k1/2, y[i] + l1/2)
l2 = h*g(x[i] + k1/2, y[i] + l1/2)
k3 = h*f(x[i] + k2/2, y[i] + l2/2)
l3 = h*g(x[i] + k2/2, y[i] + l2/2)
k4 = h*f(x[i] + k3, y[i] + l3)
l4 = h*g(x[i] + k3, y[i] + l3)
x[i + 1] = x[i] + (1/6) * (k1 + 2 * k2 + 2 * k3 + 2 * k4)
y[i + 1] = y[i] + (1/6) * (l1 + 2 * l2 + 2 * l3 + 2 * l4)
return t, y, x
f = lambda x, y: 0.4*x - 0.3*x*y
g = lambda x, y: -0.37*y + 0.05*x*y
x0 = 3
y0 = 1
a = 0
b = 100
h = 1
ti, y, x = rungeKutta(f, g, x0, y0, a, b, h)
plt.plot(ti, y, 'r--',ti, x, 'c.-')
plt.xlabel("Tiempo")
plt.ylabel("Poblacion")
plt.title('Modelo Depredador-Presa (Runge-Kutta)')
plt.legend(['Datos Depredador', 'Datos Presa'])
plt.show()
#Calculo de errores
datos_error_dp = pd.read_csv("C:/Users/safer/Desktop/Quinto Semestre Ingeniería de Sistemas/Análisis Numérico/Referencias/datosErrorDP.csv", encoding='cp1252', sep=';')
def cambio_punto_coma(df, col_name):
df[col_name] = df[col_name].apply(lambda x: float(x.replace(',', '.')))
return df
datos_error_dp.pipe(cambio_punto_coma,'x')
datos_error_dp.pipe(cambio_punto_coma,'y')
datos_error_dp = datos_error_dp.to_numpy()
errorRelativoPresa = (abs(datos_error_dp[len(datos_error_dp)-1, 2] - x[len(x) - 1]) / datos_error_dp[len(datos_error_dp) - 1, 2]) * 100
errorRelativoDepredador = (abs(datos_error_dp[len(datos_error_dp)-1, 3] - y[len(y) - 1]) / datos_error_dp[len(datos_error_dp) - 1, 3]) * 100
print("Eror Relativo Presas RK = {}% ".format(errorRelativoPresa))
print("Error Relativo Depredadores RK {}%".format(errorRelativoDepredador))
print("----------------------------------------------")
ti, x, y = euler_completo(x0,y0,h,f,g,a,b)
plt.plot(ti, y, 'r--',ti, x, 'c.-')
plt.xlabel("Tiempo")
plt.ylabel("Poblacion")
plt.title('Modelo Depredador-Presa (Euler)')
plt.legend(['Datos Depredador', 'Datos Presa'])
plt.show()
#Calculo de errores
datos_error_dp = pd.read_csv("C:/Users/safer/Desktop/Quinto Semestre Ingeniería de Sistemas/Análisis Numérico/Referencias/datosErrorDP.csv", encoding='cp1252', sep=';')
def cambio_punto_coma(df, col_name):
df[col_name] = df[col_name].apply(lambda x: float(x.replace(',', '.')))
return df
datos_error_dp.pipe(cambio_punto_coma,'x')
datos_error_dp.pipe(cambio_punto_coma,'y')
datos_error_dp = datos_error_dp.to_numpy()
errorRelativoPresa = (abs(datos_error_dp[len(datos_error_dp)-1, 2] - x[len(x) - 1]) / datos_error_dp[len(datos_error_dp) - 1, 2]) * 100
errorRelativoDepredador = (abs(datos_error_dp[len(datos_error_dp)-1, 3] - y[len(y) - 1]) / datos_error_dp[len(datos_error_dp) - 1, 3]) * 100
print("Eror Relativo Presas Euler = {}% ".format(errorRelativoPresa))
print("Error Relativo Depredadores Euler {}%".format(errorRelativoDepredador))
|
#edit-mode: -*- python -*-
#coding:gbk
#工作路径.
WORKROOT('../../../')
#使用硬链接copy.
CopyUsingHardLink(True)
#支持32位/64位平台编译
#ENABLE_MULTI_LIBS(True)
#C预处理器参数.
CPPFLAGS('-D_GNU_SOURCE -D__STDC_LIMIT_MACROS -DVERSION=\\\"1.0.0.0\\\"')
#为32位目标编译指定额外的预处理参数
#CPPFLAGS_32('-D_XOPEN_SOURE=500')
#C编译参数.
CFLAGS('-std=c++11 -g -pipe -W -Wall -fPIC')
#C++编译参数.
CXXFLAGS('-std=c++11 -g -pipe -W -Wall -Wno-unused-parameter -fPIC')
#IDL编译参数
IDLFLAGS('--compack')
#UBRPC编译参数
UBRPCFLAGS('--compack')
#头文件路径.
INCPATHS('. ../src ../include ../proto')
#链接参数.
LDFLAGS('-lpthread -lcrypto -lrt -lcrypt')
#依赖模块
ImportConfigsFrom('../')
#为32位/64位指定不同的依赖路径.
#CONFIGS_32('lib2/ullib')
#CONFIGS_64('lib2-64/ullib')
test_sources=GLOB('../src/*.cpp ../src/*.cc *.cpp ../proto/*.cc').replace("../src/main.cpp", ' ')
Application('gtest',Sources(test_sources),ENV.LinkLibs()-LinkLibs('../../third-64/boost/lib/libboost_prg_exec_monitor.a')-LinkLibs('../../third-64/boost/lib/libboost_test_exec_monitor.a')-LinkLibs('../../third-64/boost/lib/libboost_unit_test_framework.a')-LinkLibs('../../public/bslext/output/lib/libbsl_var_vscript.a'))
|
import os
import sys
import importlib
import re
SPACE_NORMALIZER = re.compile(r"\s+")
def tokenize_line_word(line):
line = SPACE_NORMALIZER.sub(" ", line)
line = line.strip()
return line.split()
def tokenize_line_char(line):
line = SPACE_NORMALIZER.sub("", line)
line = line.strip()
return list(line)
def import_user_module(module_path):
if module_path is not None:
module_path = os.path.abspath(module_path)
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
sys.path.pop(0)
|
import Tkinter as tk
import threading
import pyaudio
import wave
from array import array
from os import stat
import socket
import time
import os
global x
x=0
def sendLastFun():
send(x)
def send(n):
time.sleep(2)
arr = array('B') # create binary array to hold the wave file
name = "File" + str(n) + ".wav"
result = stat(name)
f = open(name, 'rb') # this will send
arr.fromfile(f, result.st_size) # using file size as the array length
#print("Length of data: " + str(len(arr)))
HOST = 'localhost'#Loopback to cheak the trans. info use on -IPv4 in TCP/IP one way rode
PORT = 50007
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.send(arr)
print('Finished sending...')
s.close()
f.closed
print('done.')
def TestSendFun():
arr = array('B') # create binary array to hold the wave file
name = "swapFile.wav"
result = stat(name) # sample file is in the same folder
f = open(name, 'rb') # this will play
arr.fromfile(f, result.st_size) # using file size as the array length
#print("Length of data: " + str(len(arr)))
HOST = 'localhost'
PORT = 50007
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOST, PORT))
s.send(arr)
print('Finished sending swap File...')
s.close()
time.sleep(2)
os.system('python Pc2.py')
exit()
send(x+1)
|
"""
Необходимо использовать функции. Программа должна поддерживать следующие арифметические операции:
+, -, /, *, %(получение процента от числа), **(возведение в квадрат), **х(возведение в степень числа х).
Запрещено подключать дополнительные модули. Для вывода результата необходимо использовать функцию print().
"""
args = input().replace("**", "$")
print(args)
op = ""
ops = ("$", "+", "-", "/", "*", "%", "/")
for item in args:
if item in ops:
op = item
op_idx = args.find(op)
result = 0
print(op)
if op == "%":
result = int(args[:op_idx]) / 100
else:
first = 0
list = 0
first = int(args[:op_idx])
last = int(args[op_idx + 1:])
print(first, op, last)
print(result)
|
#coding:gb2312
#分析文本
filename = 'test.txt'
try:
with open(filename) as f:
infomation = f.read()
except FileNotFoundError:
msg = ("Sorry,the file "+filename+" does not exit.")
print(msg)
else:
"""
对变量infomation(它现在是一个长长的字符串,包含断箭的全部文本)
调用方法split(),以生成一个列表,其中包含这个文章中的所有文字
"""
words = infomation.split()
num_words = len(words)
print("这篇文章一共有 "+str(num_words)+" 个单词。")
|
# -*- coding: utf-8 -*-
from flask import Flask, g, json, render_template, Response, request
import psycopg2
import psycopg2.extras
from Config import Config
import logging
import os.path
from subprocess import call
import tempfile
from zipfile import *
from urllib2 import urlopen, URLError, HTTPError
app = Flask(__name__)
### CONFIG
# 설정 읽어오기
crr_path = os.path.dirname(os.path.realpath(__file__))
config = Config(os.path.join(crr_path, "GeepsAdminZoneService.cfg"))
### LOGGING
# 로깅 모드 설정
logging.basicConfig(level=eval("logging." + config.log_mode))
LOG_FILE_PATH = os.path.join(crr_path, config.log_file)
# 로깅 형식 지정
# http://gyus.me/?p=418
logger = logging.getLogger("AdminZone")
formatter = logging.Formatter('[%(levelname)s] %(asctime)s > %(message)s')
fileHandler = logging.FileHandler(LOG_FILE_PATH)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
### HANGUL
# 한글로 된 인자들을 받을때 오류가 생기지 않게 기본 문자열을 utf-8로 지정
# http://libsora.so/posts/python-hangul/
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
### DATABASE
# Using SQLite 3 with Flask를 참조해 DB 관리 구조를 만듬
# http://flask.pocoo.org/docs/0.10/patterns/sqlite3/#sqlite3
def connect_to_database():
if config.db_pwd:
return psycopg2.connect("dbname={} user={} password={}"
.format(config.db_database, config.db_user, config.db_pwd))
else:
return psycopg2.connect("dbname={} user={}"
.format(config.db_database, config.db_user))
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_to_database()
logger.info("### DB CONNECTED.")
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
logger.info("### DB DISCONNECTED.")
def query_db(query, args=(), one=False, cursor_factory=None):
logger.debug(query)
try:
if cursor_factory:
cur = get_db().cursor(cursor_factory=cursor_factory)
else:
cur = get_db().cursor()
cur.execute(query, args)
rv = cur.fetchall()
except Exception as e:
logger.error("[DB ERROR] {}\n L___ {}", str(e), query)
rv = (None,)
finally:
cur.close()
return (rv[0] if rv else None) if one else rv
def get_class1():
return query_db("select distinct class1 from adminzone_meta order by class1")
def get_class2(class1):
return query_db("select distinct class2 from adminzone_meta where class1 = '{}' order by class2".format(class1.encode("utf-8")))
def get_class3(class1, class2):
return query_db("select distinct class1, class2, class3 from adminzone_meta where class1 = ? and class2 = ? order by class1, class2, class3", class1.encode("utf-8"), class2.encode("utf-8"))
def get_timing(class1, class2):
return query_db("select distinct timing, table_name from adminzone_meta where class1 = '{}' and class2 = '{}' order by timing desc".format(class1.encode("utf-8"), class2.encode("utf-8")))
def get_all_meta():
# 결과를 col_name:value 딕셔너리로 만든다.
# http://initd.org/psycopg/docs/extras.html
return query_db("select * from adminzone_meta order by class1, class2, class3, timing desc", cursor_factory=psycopg2.extras.NamedTupleCursor)
def get_all_meta_json():
res = get_all_meta()
dict_res = dict()
for row in res:
class1 = row.class1
level = 1
class2 = row.class2
if class2: level = 2
class3 = row.class3
if class3: level = 3
data = {'table_name':row.table_name, 'timing':row.timing, 'agency':row.agency,
'source_url':row.source_url, 'image_url':row.image_url, 'source_name':row.source_name,
'description':row.description}
if level == 1:
if not dict_res.has_key(class1):
dict_res[class1] = list()
dict_res[class1].append(data)
elif level == 2:
if not dict_res.has_key(class1):
dict_res[class1] = dict()
if not dict_res[class1].has_key(class2):
dict_res[class1][class2] = list()
dict_res[class1][class2].append(data)
else: # level == 3
if not dict_res.has_key(class1):
dict_res[class1] = dict()
if not dict_res[class1].has_key(class2):
dict_res[class1][class2] = dict()
if not dict_res[class1][class2].has_key(class3):
dict_res[class1][class2][class3] = list()
dict_res[class1][class2][class3].append(data)
return json.dumps(dict_res, ensure_ascii=False)
def get_count_info():
return query_db("select (select count(*) as n_class1 from (select distinct class1 from adminzone_meta) as t_class1), (select count(*) as n_total from adminzone_meta)")
### EVENT
@app.route('/test')
@app.route('/adminzone/test')
def hello():
return "GeepsAdminZoneService Activated!"
@app.route('/api/get_class1')
@app.route('/adminzone/api/get_class1')
def api_get_class1():
out_list = list()
for row in get_class1():
out_list.append(row[0])
ret = Response(json.dumps(out_list, ensure_ascii=False), mimetype='text/json')
ret.content_encoding = 'utf-8'
ret.headers.set("Cache-Control", "public, max-age=604800")
return ret
@app.route('/api/get_all_meta')
@app.route('/adminzone/api/get_all_meta')
def api_get_all_meta():
json_res = get_all_meta_json()
ret = Response(json_res, mimetype='text/json')
ret.content_encoding = 'utf-8'
ret.headers.set("Cache-Control", "public, max-age=604800")
return ret
@app.route('/api/get_image')
@app.route('/adminzone/api/get_image')
def api_get_image():
table_name = request.args.get('table_name', None)
if not table_name:
return Response("table_name 인자가 필요합니다.", 400)
# table_name 있는지 확인
res = query_db("select count(*) from adminzone_meta where table_name = %s", args=(table_name,), one=True)
if res[0] <= 0:
return Response("요청한 TABLE이 없습니다.", 500)
# image_url 조회
res = query_db("select image_url from adminzone_meta where table_name = %s", args=(table_name,), one=True)
image_url = res[0]
image_path = os.path.join(config.image_folder, table_name+'.png')
if not os.path.isfile(image_path):
try:
f = urlopen(image_url)
# Open our local file for writing
with open(image_path, "wb") as local_file:
local_file.write(f.read())
#handle errors
except HTTPError, e:
logger.error("HTTP Error:" + e.code + image_url)
except URLError, e:
logger.error("URL Error:" + e.reason + image_url)
try:
with open(image_path, "rb") as f:
image_bin = f.read()
except Exception as e:
logger.error("Image 다운로드 중 오류: "+str(e))
return Response("Image 다운로드 중 오류", 500)
ret = Response(image_bin, mimetype='image/png')
return ret
@app.route('/service_page')
@app.route('/adminzone/service_page')
def service_page():
count_info = get_count_info()
all_meta_json = get_all_meta_json()
return render_template("service_page.html",
count_info=count_info[0],
metadata=all_meta_json,
crs_list=config.crs_list)
@app.route('/makefile')
@app.route('/adminzone/makefile')
def makefile():
table_name = request.args.get('table_name', None)
crs = request.args.get('crs', None)
if not crs:
return Response("crs 인자가 필요합니다.", 400)
if not table_name:
return Response("table_name 인자가 필요합니다.", 400)
# crs 있는지 확인
# if not ("EPSG:"+crs) in config.crs_list:
res = query_db("select count(*) from spatial_ref_sys where srid = %s", args=(crs,), one=True)
if res[0] <= 0:
return Response("요청한 CRS가 없습니다.", 500)
# table_name 있는지 확인
res = query_db("select count(*) from adminzone_meta where table_name = %s", args=(table_name,), one=True)
if res[0] <= 0:
return Response("요청한 TABLE이 없습니다.", 500)
# file name을 <table_name>__<crs>로 정함
file_base = table_name+"__"+crs
zip_file = os.path.join(config.download_folder, file_base+".zip")
if os.path.isfile(zip_file):
return Response("기존 파일 있음", 200)
temp_dir = tempfile.gettempdir()
shp_file = os.path.join(temp_dir, file_base+".shp")
# 조회용 Query 만들기
# http://splee75.tistory.com/93
res = query_db(
"""
select string_agg(txt, ', ')
from (
SELECT concat('SELECT ', string_agg(column_name, ', ')) as txt
FROM information_schema.columns
WHERE table_schema = 'public'
AND table_name = '{table_name}'
AND udt_name != 'geometry'
union
SELECT concat('ST_Transform(', string_agg(column_name, ', '), ',{crs}) as geom FROM ""{table_name}""') as txt
FROM information_schema.columns
WHERE table_schema = 'public'
AND table_name = '{table_name}'
AND udt_name = 'geometry'
) tbl
""".format(crs=crs, table_name=table_name),
one=True)
sql = res[0]
try:
# Shape 파일 만들기
command = 'pgsql2shp -f {shp_file} -u {user} -P {pwd} {database} "{sql}"'.format(
shp_file=shp_file, user=config.db_user, pwd=config.db_pwd, database=config.db_database, sql=sql)
logger.debug(command)
rc = call(command)
if rc != 0:
return Response("Shape 파일 생성 중 오류", 500)
with ZipFile(zip_file, 'w') as shape_zip:
shape_zip.write(os.path.join(temp_dir, file_base+".shp"), arcname=file_base+".shp")
shape_zip.write(os.path.join(temp_dir, file_base+".shx"), arcname=file_base+".shx")
shape_zip.write(os.path.join(temp_dir, file_base+".dbf"), arcname=file_base+".dbf")
shape_zip.write(os.path.join(temp_dir, file_base+".prj"), arcname=file_base+".prj")
os.remove(os.path.join(temp_dir, file_base+".shp"))
os.remove(os.path.join(temp_dir, file_base+".shx"))
os.remove(os.path.join(temp_dir, file_base+".dbf"))
os.remove(os.path.join(temp_dir, file_base+".prj"))
except Exception as e:
logger.error("Shape 파일 생성 중 오류: "+str(e))
return Response("Shape 파일 생성 중 오류", 500)
return Response("파일 생성 완료", 200)
@app.route('/download')
@app.route('/adminzone/download')
def download():
table_name = request.args.get('table_name', None)
crs = request.args.get('crs', None)
if not crs:
return Response("crs 인자가 필요합니다.", 400)
if not table_name:
return Response("table_name 인자가 필요합니다.", 400)
# file name을 <table_name>__<crs>로 정함
file_base = table_name+"__"+crs
zip_file = os.path.join(config.download_folder, file_base+".zip")
if not os.path.isfile(zip_file):
return Response("ZIP 파일 없음", 500)
try:
with open(zip_file, "rb") as f:
zip_bin = f.read()
except Exception as e:
logger.error("Shape 다운로드 중 오류: "+str(e))
return Response("Shape 다운로드 중 오류", 500)
ret = Response(zip_bin, mimetype='application/zip')
ret.headers["Content-Disposition"] = "attachment; filename={}".format(file_base+".zip")
return ret
if __name__ == '__main__':
app.run()
|
from __future__ import annotations
from dataclasses import dataclass
from typing import List
@dataclass
class Node:
val: int = None
next: Node = None
@property
def last(self) -> bool:
return self.next is None
def __lt__(self, other: Node) -> bool:
return self.val < other.val
def merge(lists: List[Node]) -> Node:
"""
Merges linked lists.
"""
while len(lists) > 1:
lists = [Merger(lists[i], lists[i + 1]).merge() for i in range(0, len(lists) - 1, 2)]
return lists[0]
class Merger:
def __init__(self, node_1: Node, node_2: Node):
self.nodes = [node_1, node_2]
self.start = Node()
self.point = self.start
def merge(self) -> Node:
"""
Merges two linked lists.
"""
while not self._reached_end:
self._move_point()
self._choose_node(self._min)
self._connect_rest(self._min)
return self.start.next
@property
def _min(self) -> int:
"""
Returns list with lesser minimum element.
"""
return 0 if self.nodes[0] < self.nodes[1] else 1
@property
def _reached_end(self) -> bool:
"""
True is one of the nodes is last in its list.
"""
return any(node.last for node in self.nodes)
def _move_forward(self, node_index) -> None:
"""
Switches node to next one.
"""
self.nodes[node_index] = self.nodes[node_index].next
def _move_point(self):
"""
Creates new element in merged list.
"""
self.point.next = Node()
self.point = self.point.next
def _choose_node(self, node_index):
"""
Copies value from node to current list.
"""
self.point.val = self.nodes[node_index].val
self._move_forward(node_index)
def _connect_rest(self, starting_node_index):
"""
Links nodes to the end of merged list starting from given node.
"""
self.point.next = self.nodes.pop(starting_node_index)
self._scroll_point()
self.point.next = self.nodes.pop()
def _scroll_point(self):
"""
Moves current node to the end of the list.
"""
while self.point.next is not None:
self.point = self.point.next
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that a target marked as 'link_dependency==1' isn't being pulled into
the 'none' target's dependency (which would otherwise lead to a dependency
cycle in ninja).
"""
import TestGyp
# See https://codereview.chromium.org/177043010/#msg15 for why this doesn't
# work with cmake.
test = TestGyp.TestGyp(formats=['!cmake'])
test.run_gyp('test.gyp')
test.build('test.gyp', 'main')
# If running gyp worked, all is well.
test.pass_test()
|
#from PyML import *
#from PyML import ker
import matplotlib.pyplot as plt
import csv as csv
import numpy as np
from sklearn import svm, metrics,cross_validation
from sklearn.multiclass import OneVsRestClassifier,OneVsOneClassifier
from sklearn import preprocessing
def read_data(file_name):
if file_name =='train':
csv_file_object = csv.reader(open('train.csv', 'rb'))
header = csv_file_object.next()
train=[]
labels=[]
for row in csv_file_object:
labels.append(int(row[0]))
train.append(map(int,row[1:]))
return labels,train
if file_name == 'test':
csv_file_object = csv.reader(open('test.csv', 'rb'))
header = csv_file_object.next()
test=[]
for row in csv_file_object:
test.append(row)
return test
#data = np.array(data)
def write_prediction(file_name,prediction):
prediction_file = open(file_name+".csv", "wb")
prediction_file_object = csv.writer(prediction_file)
for i in prediction:
prediction_file_object.writerow((i))
''' set all labels in target 0 and the rest 1'''
def trim_labels(target,labels):
trimmed_labels=[]
for i in labels:
if i in target:
trimmed_labels.append(0)
else:
trimmed_labels.append(1)
return trimmed_labels
'''test by hand'''
def manual_test(clf,train, train_labels,test,test_labels):
clf.fit(train, train_labels)
predicted = clf.predict(test)
print_report(clf,test_labels,predicted)
'''print report and confusion matrx'''
def print_report(clf,expected,predicted):
print("Classification report for classifier %s:\n%s\n"
% (clf, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
'''cross validation test with one vs all svm'''
def cv_one_vs_all(clf,train, labels):
clf = OneVsRestClassifier(svm.LinearSVC())
scores = cross_validation.cross_val_score(clf,train,labels, cv=5)
print "one vs all", ("{0:.5f}".format(np.mean(scores)))
return clf
'''cross validation test with one vs one svm'''
def cv_one_vs_one(clf,train, labels):
#clf = OneVsOneClassifier(LinearSVC())
scores = cross_validation.cross_val_score(clf,train,labels, cv=5)
print "one vs all", ("{0:.5f}".format(np.mean(scores)))
return clf
def baseline(train, labels):
classifier = svm.SVC(gamma=1)
classifier.fit(train[:1000], labels[:1000])
expected = labels[1000:2000]
predicted = classifier.predict(train[1000:2000])
#print_report(classifier, expected, predicted)
print("Classification report for classifier \n%s\n"
% metrics.classification_report(expected, predicted))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
return expected,predicted
def F_score(train,labels):
sum_f=[[0,0,0,0,0,0,0,0,0,0] for i in range(len(train[0]))]
for i in range(len(train)):
label=labels[i]
for j in range(len(train[0])):
sum_f[j][label]+=train[i][j]
feature=[0,0,0,0,0,0,0,0,0,0]
for i in range(len(labels)):
feature[labels[i]]+=1;
total_sum=[sum(sum_f[i]) for i in range(len(sum_f))]
total_mean=[(total_sum[i]+0.0)/len(train) for i in range(len(total_sum))]
mean_f=[[0,0,0,0,0,0,0,0,0,0] for i in range(len(train[0]))]
for i in range(len(sum_f)):
for j in range(len(sum_f[0])):
mean_f[i][j]=(sum_f[i][j]+0.0)/feature[j]
f=[0 for i in range(len(train[0]))]
numerator=[0 for i in range(len(train[0]))]
divider=[0 for i in range(len(train[0]))]
for i in range(len(f)):
for j in range(10):
numerator[i]+=(mean_f[i][j]-total_mean[i])*(mean_f[i][j]-total_mean[i])
#print mean_f[i][j], total_mean[i]
de_sum=[0.0 for i in range(10)]
for m in range(len(train)):
l=labels[m]
de_sum[l]+=(train[m][i]+mean_f[i][l]+0.0)*(train[m][i]+mean_f[i][l]+0.0)
#print train[m][i],mean_f[i][l]
de_sum=[(de_sum[i]+0.0)/feature[i] for i in range(len(de_sum))]
divider[i]=sum(de_sum)
if divider[i] == 0:
f[i]=100000
else:
f[i]=numerator[i]/divider[i]
return f
#for i in range(len(sum_f)):
if remove_all_zero(data,total_sum):
for i in range(len(total_sum)):
if total_sum[len(total_sum)-1-i]==0:
scipy.delete(second,len(total_sum)-1-i,1)
if __name__ == "__main__":
labels,train=read_data('train')
A=F_score(train,labels)
#test=read_data('test')
#print 'finish reading test'
#clf = svm.LinearSVC()
#baseline(train,labels)
#cv_one_vs_all(clf,train[:500], labels[:500])
#cv_one_vs_one(clf,train[:500], labels[:500])
#expected,predicted=baseline(train,labels)
#classifier = svm.LinearSVC()
#scores = cross_validation.cross_val_score(classifier,train,labels, cv=5)
#classifier.fit(train[:1000], labels[:1000])
#expected = labels[500:1000]
#predicted = classifier.predict(train[1000:2000])
#print_report(classifier, expected, predicted)
#print("Classification report for classifier %s:\n%s\n"
# % (classifier, metrics.classification_report(expected, predicted)))
#print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
|
#coding:utf-8
from pyecharts import ThemeRiver
import json
def tongji(filepath):
rate = []
with open(filepath,'r') as f:
rows = f.readlines()
for row in rows:
if len(row.split(',')) == 5:
rate.append(row.split(',')[3].replace('\n',''))
v1=(rate.count('5')+rate.count('4.5'))
v2=(rate.count('4')+rate.count('3.5'))
v3=(rate.count('3')+rate.count('2.5'))
v4=(rate.count('2')+rate.count('1.5'))
v5=(rate.count('1')+rate.count('0.5'))
#饼状图
from pyecharts import Pie
attr = [u"五星", u"四星", u"三星", u"二星", u"一星"]
print json.dumps(attr,ensure_ascii=False)
#分别代表各星级评论数
v=[v1,v2,v3,v4,v5]
print v
if filepath=='xie_zheng.txt':
pie = Pie(u"《邪不压正》饼图-星级玫瑰图示例", title_pos='center', width=900)
pie.add("7-17", attr, v, center=[75, 50], is_random=True,
radius=[30, 75], rosetype='area',
is_legend_show=False, is_label_show=True)
pie.render(filepath.split('.')[0]+'_pie.html')
else:
pie = Pie(u"《我不是药神》饼图-星级玫瑰图示例", title_pos='center', width=900)
pie.add("7-17", attr, v, center=[75, 50], is_random=True,
radius=[30, 75], rosetype='area',
is_legend_show=False, is_label_show=True)
pie.render(filepath.split('.')[0]+'_pie.html')
print "《邪不压正》:"
tongji('xie_zheng.txt')
print '\n'
print "《我不是药神》:"
tongji('yaoshen.txt')
|
def adding(a,b):
my_sum = a+b
my_string = "{} + {} = {}".format(a,b,my_sum)
print(my_string)
def subtract(a,b):
my_sum = a-b
my_string = "{} - {} = {}".format(a,b,my_sum)
print(my_string)
def multiply(a,b):
my_product = a*b
my_string = "{} * {} = {}".format(a,b,my_product)
print(my_string)
def divide(a,b):
my_product = a/b
my_string = "{} / {} = {}".format(a,b,my_product)
print(my_string)
def get_integer(m):
my_number = int(input(m))
return my_number
def menu():
num_one = get_integer("Please enter your first number: ")
num_two = get_integer("Please enter your second number: ")
my_menu = '''
1 : add
2 : subtract
3 : multiply
4 : divide
0 : quit
'''
print(my_menu)
choice = get_integer("Please enter your choice from the menu: ")
if choice ==1:
adding(num_one, num_two)
elif choice ==2:
subtract(num_one, num_two)
elif choice ==3:
multiply(num_one, num_two)
elif choice ==4:
divide(num_one, num_two)
elif choice ==0:
print("Thank you")
else:
print("Unrecognised entry")
#adding(3,5)
#subtract(8,5)
#multiply(3,4)
#divide(35,7)
menu()
|
from Algorithm import GreedySearchDecoder, EncoderRNN, LuongAttnDecoderRNN
from LoadFile import loadPrepareData
from Evaluate import evaluateInput
import argparse
import os
import torch
import torch.nn as nn
parser = argparse.ArgumentParser(description='Train Data')
parser.add_argument("-c", "--checkpoint", type=int,
help="Input checkpoint number")
args = vars(parser.parse_args())
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
# load data and model
save_dir = os.path.join("model", "save")
corpus = "data"
datafile = os.path.join(corpus, "formatted_movie_lines.txt")
model_name = 'cb_model'
hidden_size = 500
encoder_n_layers = 2
decoder_n_layers = 2
dropout = 0.1
attn_model = 'dot'
checkpoint_iter = args['checkpoint'] if args['checkpoint'] != None else 4000
# call function loadPrepareData
voc, pairs = loadPrepareData(corpus, datafile)
loadFilename = os.path.join(save_dir, model_name, corpus,
'{}-{}_{}'.format(encoder_n_layers,
decoder_n_layers, hidden_size),
'{}_checkpoint.tar'.format(checkpoint_iter))
# Load model if a loadFilename is provided
checkpoint = torch.load(loadFilename, map_location=device)
voc.__dict__ = checkpoint['voc_dict']
# load embedding
embedding = nn.Embedding(voc.num_words, hidden_size)
embedding.load_state_dict(checkpoint['embedding'])
# load encoder & decoder models
encoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)
decoder = LuongAttnDecoderRNN(
attn_model, embedding, hidden_size, voc.num_words, decoder_n_layers, dropout)
encoder.load_state_dict(checkpoint['en'])
decoder.load_state_dict(checkpoint['de'])
# Set dropout layers to eval mode
encoder.eval()
decoder.eval()
# Initialize search module
searcher = GreedySearchDecoder(encoder, decoder)
# Begin chatting (uncomment and run the following line to begin)
evaluateInput(encoder, decoder, searcher, voc)
|
from .slam_data import SLAMData
from .state import State
from .cone_finder import find_nearest_cone
class SLAM:
def __init__(self, searchable_size):
self.left_index = 0
self.right_index = 0
self.searchable_size = searchable_size
def update(self, car, all_left_cones, all_right_cones):
# Obtain vehicle state
state = State.from_car(car)
# Get the first nearest left and right cone in terms of index
self.left_index = find_nearest_cone(car.x, car.y, all_left_cones, self.left_index, self.searchable_size)
self.right_index = find_nearest_cone(car.x, car.y, all_right_cones, self.right_index, self.searchable_size)
# Obtain a list of N nearest left and right cones in front of the car, and update the state
left_cones = all_left_cones[self.left_index:(self.left_index + self.searchable_size + 1)]
right_cones = all_right_cones[self.right_index:(self.right_index + self.searchable_size + 1)]
# For the cones detected by SLAM, toggle them so they will be rendered
[cone.set_detected(True) for cone in left_cones + right_cones]
# Return the updated state
return SLAMData(state, left_cones, right_cones)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import io
import zipfile
from pathlib import PurePath
from textwrap import dedent
import pytest
from pants.backend.python import target_types_rules
from pants.backend.python.goals import package_dists
from pants.backend.python.macros.python_artifact import PythonArtifact
from pants.backend.python.subsystems.setuptools import rules as setuptools_rules
from pants.backend.python.target_types import PythonDistribution, PythonSourcesGeneratorTarget
from pants.backend.python.util_rules import local_dists, pex_from_targets
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.local_dists import LocalDistsPex, LocalDistsPexRequest
from pants.backend.python.util_rules.pex_from_targets import InterpreterConstraintsRequest
from pants.backend.python.util_rules.python_sources import PythonSourceFiles
from pants.build_graph.address import Address
from pants.core.util_rules.source_files import SourceFiles
from pants.engine.fs import CreateDigest, Digest, DigestContents, FileContent, Snapshot
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import QueryRule
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
return PythonRuleRunner(
rules=[
*local_dists.rules(),
*package_dists.rules(),
*setuptools_rules(),
*target_types_rules.rules(),
*pex_from_targets.rules(),
QueryRule(InterpreterConstraints, (InterpreterConstraintsRequest,)),
QueryRule(LocalDistsPex, (LocalDistsPexRequest,)),
],
target_types=[PythonSourcesGeneratorTarget, PythonDistribution],
objects={"python_artifact": PythonArtifact},
)
def test_build_local_dists(rule_runner: PythonRuleRunner) -> None:
foo = PurePath("foo")
rule_runner.write_files(
{
foo
/ "BUILD": dedent(
"""
python_sources()
python_distribution(
name = "dist",
dependencies = [":foo"],
provides = python_artifact(name="foo", version="9.8.7"),
sdist = False,
generate_setup = False,
)
"""
),
foo / "bar.py": "BAR = 42",
foo
/ "setup.py": dedent(
"""
from setuptools import setup
setup(name="foo", version="9.8.7", packages=["foo"], package_dir={"foo": "."},)
"""
),
}
)
rule_runner.set_options([], env_inherit={"PATH"})
sources_digest = rule_runner.request(
Digest,
[
CreateDigest(
[FileContent("srcroot/foo/bar.py", b""), FileContent("srcroot/foo/qux.py", b"")]
)
],
)
sources_snapshot = rule_runner.request(Snapshot, [sources_digest])
sources = PythonSourceFiles(SourceFiles(sources_snapshot, tuple()), ("srcroot",))
addresses = [Address("foo", target_name="dist")]
interpreter_constraints = rule_runner.request(
InterpreterConstraints, [InterpreterConstraintsRequest(addresses)]
)
request = LocalDistsPexRequest(
addresses,
internal_only=True,
sources=sources,
interpreter_constraints=interpreter_constraints,
)
result = rule_runner.request(LocalDistsPex, [request])
assert result.pex is not None
contents = rule_runner.request(DigestContents, [result.pex.digest])
whl_content = None
for content in contents:
if content.path == "local_dists.pex/.deps/foo-9.8.7-py3-none-any.whl":
whl_content = content
assert whl_content
with io.BytesIO(whl_content.content) as fp:
with zipfile.ZipFile(fp, "r") as whl:
assert "foo/bar.py" in whl.namelist()
# Check that srcroot/foo/bar.py was subtracted out, because the dist provides foo/bar.py.
assert result.remaining_sources.source_files.files == ("srcroot/foo/qux.py",)
|
import json
import rdflib as rdfl
import sbol3
import tyto
import labop
import uml
from labop.execution_engine import ExecutionEngine
from labop_convert.opentrons.opentrons_specialization import OT2Specialization
# Dev Note: This is a test of the initial version of the OT2 specialization. Any specs shown here can be changed in the future. Use at your own risk. Here be dragons.
#############################################
# set up the document
print("Setting up document")
doc = sbol3.Document()
sbol3.set_namespace("https://bbn.com/scratch/")
#############################################
# Import the primitive libraries
print("Importing libraries")
labop.import_library("liquid_handling")
print("... Imported liquid handling")
labop.import_library("plate_handling")
print("... Imported plate handling")
labop.import_library("spectrophotometry")
print("... Imported spectrophotometry")
labop.import_library("sample_arrays")
print("... Imported sample arrays")
# Example of how to generate a template for a new protocol step
# print(primitives["https://bioprotocols.org/labop/primitives/liquid_handling/Dispense"].template())
protocol = labop.Protocol("iGEM_LUDOX_OD_calibration_2018")
protocol.name = "iGEM 2018 LUDOX OD calibration protocol for OT2"
protocol.description = """
Test Execution
"""
doc.add(protocol)
# create the materials to be provisioned
CONT_NS = rdfl.Namespace("https://sift.net/container-ontology/container-ontology#")
OM_NS = rdfl.Namespace("http://www.ontology-of-units-of-measure.org/resource/om-2/")
PREFIX_MAP = json.dumps({"cont": CONT_NS, "om": OM_NS})
ddh2o = sbol3.Component("ddH2O", "https://identifiers.org/pubchem.substance:24901740")
ddh2o.name = "Water, sterile-filtered, BioReagent, suitable for cell culture"
doc.add(ddh2o)
ludox = sbol3.Component("LUDOX", "https://identifiers.org/pubchem.substance:24866361")
ludox.name = "LUDOX(R) CL-X colloidal silica, 45 wt. % suspension in H2O"
doc.add(ludox)
p300 = sbol3.Agent("p300_single", name="P300 Single")
doc.add(p300)
load = protocol.primitive_step("ConfigureInstrument", instrument=p300, mount="left")
# Define labware
spec_rack = labop.ContainerSpec(
"working_reagents_rack",
name="rack for reagent aliquots",
queryString="cont:Opentrons24TubeRackwithEppendorf1.5mLSafe-LockSnapcap",
prefixMap=PREFIX_MAP,
)
spec_ludox_container = labop.ContainerSpec(
"ludox_working_solution",
name="tube for ludox working solution",
queryString="cont:MicrofugeTube",
prefixMap=PREFIX_MAP,
)
spec_water_container = labop.ContainerSpec(
"water_stock",
name="tube for water aliquot",
queryString="cont:MicrofugeTube",
prefixMap=PREFIX_MAP,
)
spec_plate = labop.ContainerSpec(
"calibration_plate",
name="calibration plate",
queryString="cont:Corning96WellPlate360uLFlat",
prefixMap=PREFIX_MAP,
)
spec_tiprack = labop.ContainerSpec(
"tiprack", queryString="cont:Opentrons96TipRack300uL", prefixMap=PREFIX_MAP
)
doc.add(spec_rack)
doc.add(spec_ludox_container)
doc.add(spec_water_container)
doc.add(spec_plate)
doc.add(spec_tiprack)
# Load OT2 instrument with labware
load = protocol.primitive_step("LoadRackOnInstrument", rack=spec_rack, coordinates="1")
load = protocol.primitive_step(
"LoadRackOnInstrument", rack=spec_tiprack, coordinates="2"
)
load = protocol.primitive_step("LoadRackOnInstrument", rack=spec_plate, coordinates="3")
# Set up reagents
rack = protocol.primitive_step("EmptyRack", specification=spec_rack)
load_rack1 = protocol.primitive_step(
"LoadContainerInRack",
slots=rack.output_pin("slots"),
container=spec_ludox_container,
coordinates="A1",
)
load_rack2 = protocol.primitive_step(
"LoadContainerInRack",
slots=rack.output_pin("slots"),
container=spec_water_container,
coordinates="A2",
)
provision = protocol.primitive_step(
"Provision",
resource=ludox,
destination=load_rack1.output_pin("samples"),
amount=sbol3.Measure(500, tyto.OM.microliter),
)
provision = protocol.primitive_step(
"Provision",
resource=ddh2o,
destination=load_rack2.output_pin("samples"),
amount=sbol3.Measure(500, tyto.OM.microliter),
)
# Set up target samples
plate = protocol.primitive_step("EmptyContainer", specification=spec_plate)
water_samples = protocol.primitive_step(
"PlateCoordinates", source=plate.output_pin("samples"), coordinates="A1:D1"
)
ludox_samples = protocol.primitive_step(
"PlateCoordinates", source=plate.output_pin("samples"), coordinates="A2:D2"
)
transfer = protocol.primitive_step(
"Transfer",
source=load_rack1.output_pin("samples"),
destination=water_samples.output_pin("samples"),
amount=sbol3.Measure(100, tyto.OM.microliter),
)
transfer = protocol.primitive_step(
"Transfer",
source=load_rack1.output_pin("samples"),
destination=ludox_samples.output_pin("samples"),
amount=sbol3.Measure(100, tyto.OM.microliter),
)
filename = "ot2_ludox_labop"
agent = sbol3.Agent("ot2_machine", name="OT2 machine")
ee = ExecutionEngine(specializations=[OT2Specialization(filename)])
parameter_values = []
execution = ee.execute(protocol, agent, id="test_execution")
# v = doc.validate()
# assert len(v) == 0, "".join(f'\n {e}' for e in v)
doc.write("foo.ttl", file_format="ttl")
# render and view the dot
# dot = protocol.to_dot()
# dot.render(f'{protocol.name}.gv')
# dot.view()
|
# OpenWeatherMap API Key
api_key = "0217370abad49447a775734efd95b987"
|
import support_lib as bnw
import time
import re
# This module handles interactions with special ports, and normal trading ports
def specialPort(purchaseDict):
specialText = "Special Port"
genericText = "Trading Commodities"
xBanner = "html/body/h1"
xWholePage = "html/body"
# cost of the tech, Quantity on hand, input box for purchasing more
xGenesisTorps = ["html/body/form/table[1]/tbody/tr[2]/td[2]", "html/body/form/table[1]/tbody/tr[2]/td[3]", "html/body/form/table[1]/tbody/tr[2]/td[5]/input"]
xSpaceBeacons = ["html/body/form/table[1]/tbody/tr[3]/td[2]", "html/body/form/table[1]/tbody/tr[3]/td[3]", "html/body/form/table[1]/tbody/tr[3]/td[5]/input"]
xEmerWarpDev = ["html/body/form/table[1]/tbody/tr[4]/td[2]", "html/body/form/table[1]/tbody/tr[4]/td[3]", "html/body/form/table[1]/tbody/tr[4]/td[5]/input"]
xWarpEditors = ["html/body/form/table[1]/tbody/tr[5]/td[2]", "html/body/form/table[1]/tbody/tr[5]/td[3]", "html/body/form/table[1]/tbody/tr[5]/td[5]/input"]
xMineDeflectors = ["html/body/form/table[1]/tbody/tr[7]/td[2]", "html/body/form/table[1]/tbody/tr[7]/td[3]", "html/body/form/table[1]/tbody/tr[7]/td[5]/input"]
xFighters = ["html/body/form/table[2]/tbody/tr[2]/td[2]", "html/body/form/table[2]/tbody/tr[2]/td[3]", "html/body/form/table[2]/tbody/tr[2]/td[5]/input"]
xArmorPoints = ["html/body/form/table[2]/tbody/tr[3]/td[2]", "html/body/form/table[2]/tbody/tr[3]/td[3]", "html/body/form/table[2]/tbody/tr[3]/td[5]/input"]
xEscapePod = ["html/body/form/table[1]/tbody/tr[8]/td[2]", "html/body/form/table[1]/tbody/tr[8]/td[3]", "html/body/form/table[1]/tbody/tr[8]/td[5]/input"]
xFuelScoop = ["html/body/form/table[1]/tbody/tr[9]/td[2]", "html/body/form/table[1]/tbody/tr[9]/td[3]", "html/body/form/table[1]/tbody/tr[9]/td[5]/input"]
xLastShipSeenDev = ["html/body/form/table[1]/tbody/tr[10]/td[2]", "html/body/form/table[1]/tbody/tr[10]/td[3]", "html/body/form/table[1]/tbody/tr[10]/td[5]/input"]
xTorpedoes = ["html/body/form/table[2]/tbody/tr[2]/td[7]", "html/body/form/table[2]/tbody/tr[2]/td[8]", "html/body/form/table[2]/tbody/tr[2]/td[10]/input"]
xColonists = ["html/body/form/table[2]/tbody/tr[3]/td[6]", "html/body/form/table[2]/tbody/tr[3]/td[8]", "html/body/form/table[2]/tbody/tr[3]/td[10]/input"]
compList = ["Hull", "Engines", "Power", "Computer", "Sensors", "Beam Weapons",
"Armor", "Cloak", "Torpedo launchers", "Shields"]
xSelectors = {}
# http://stackoverflow.com/questions/22171558/what-does-enumerate-mean
for compoffset, compName in enumerate(compList, 2):
xSelectors[compName] = "html/body/form/table[1]/tbody/tr[{}]/td[9]/select".format(compoffset)
xBuyButton = "html/body/form/table[3]/tbody/tr/td[1]/input"
xCredits = "html/body/p[1]"
xResultsBanner = "html/body/table/tbody/tr[1]/td/font/strong"
xTotalCost = "html/body/table/tbody/tr[2]/td/strong/font"
currentPage = bnw.getPage()
baseURL = ('/').join(currentPage.split('/')[:-1])
portPage = "{}/port.php".format(baseURL)
mainPage = "{}/main.php".format(baseURL)
# load the page
bnw.loadPage(portPage)
if not bnw.elementExists(xBanner):
allText = bnw.textFromElement(xWholePage)
if "There is no port here" in allText:
print("There is no port in this sector")
bnw.load(mainPage)
return ["ERROR", "NO PORT"]
else:
print("Unhandled Error #1 in specialPort")
exit(1)
bannerText = bnw.textFromElement(xBanner)
if genericText in bannerText:
print("This is not a special port")
bnw.load(mainPage)
return ["ERROR", "WRONG PORT"]
if not bannerText == specialText:
print("Unhandled Error #2 in specialPort")
exit(1)
# determine how many credits are available for spending
textBlob = bnw.textFromElement(xCredits)
# regex out the cost
# You have 206,527,757 credits to spend.
m = re.search("have\s+(.*)\s+credits", textBlob)
if not m:
print("Unable to regex the available credits!")
exit(1)
creditAvailable = int(m.group(1).replace(",",""))
print("Credits available: {}".format(creditAvailable))
# get the current tech levels
currentTech = {}
desiredTech = {}
for compName in compList:
if compName in purchaseDict:
xpath = xSelectors[compName]
currentTech[compName] = int(bnw.selectedValue(xpath))
desiredTech[compName] = purchaseDict[compName]
print("Current {} Tech: {}, Desired Tech: {}".format(compName, currentTech[compName], desiredTech[compName]))
if desiredTech[compName] != currentTech[compName]:
if not bnw.selectDropDownNew(xSelectors[compName], desiredTech[compName]):
print("Unable to select the requested {} tech value".format(compName))
exit(1)
print("Attempting to execute the purchase")
if not bnw.clickButton(xBuyButton):
print("Was unable to click the 'Buy' button")
exit(1)
time.sleep(2)
if not bnw.elementExists(xResultsBanner):
allText = bnw.textFromElement(xWholePage)
m = re.search("total cost is\s+(.*)\s+credits and you only have\s+(.*)\s+credits.", allText)
if not m:
print("Not a successful trade, and unable to determine why")
exit(1)
theCost = int(m.group(1).replace(",",""))
theCredits = int(m.group(2).replace(",",""))
notEnough = theCost - theCredits
print("Short {} credits".format(notEnough))
return["ERROR", "TOO EXPENSIVE"]
resultBanner = bnw.textFromElement(xResultsBanner)
if not resultBanner == "Results for this trade":
print("Results banner not found")
exit(1)
# Cost : 2,500 Credits
finalBlob = bnw.textFromElement(xTotalCost)
if finalBlob == "DONTEXIST":
print("Total cost not found")
exit(1)
m = re.search("Cost\s\:\s(.*)\sCredits", finalBlob)
if not m:
print("Unable to regex the final cost")
exit(1)
finalCost = int(m.group(1).replace(",",""))
print("final cost: {}".format(finalCost))
bnw.loadPage(mainPage)
return ["SUCCESS", finalCost]
|
'''
Problem 12: Write a function group(list, size) that take a list and splits into smaller lists of given size.
group([1, 2, 3, 4, 5, 6, 7, 8, 9], 3)
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
group([1, 2, 3, 4, 5, 6, 7, 8, 9], 4)
[[1, 2, 3, 4], [5, 6, 7, 8], [9]]
'''
import sys
print "What are the elements you want to cut up?"
x = raw_input().split(' ')
print "How big are the chunks ?"
y = int(raw_input())
i = 0
final_list = []
chunk_size_counter = 0
for i in x:
print "i is " + str(i)
print "chunk_size_counter is " + str(chunk_size_counter)
print "y is " + str(y)
if chunk_size_counter < y:
final_list.append(i)
chunk_size_counter += 1
print "if i is " + str(i)
print "if chunk_size_counter is " + str(chunk_size_counter)
print "if y is " + str(y)
if (i is x[-1]):
print "last i is " + str(i)
print "last chunk_size_counter is " + str(chunk_size_counter)
print "last y is " + str(y)
print "last element is " + str(i)
print final_list
else:
print "final_list is " + str(final_list) + "\n"
final_list = []
chunk_size_counter = 0
print "i is now " + str(i)
print "chunk_size_counter is now " + str(chunk_size_counter)
print "y is now " + str(y)
'''
final_list = []
for i in elements:
print "i is " + str(i)
count=0
if count < y:
print count
count +=1
counter +=1
final_list.append(i)
print "chunk #" + str(counter) + " is " + str(final_list)
final_list = []
count=0
# print "final chunk " + str(final_list)
'''
|
import esphome.codegen as cg
import esphome.config_validation as cv
from esphome.components import sensor
from esphome.const import CONF_ID, UNIT_EMPTY, ICON_EMPTY
from . import EmptySensorHub, CONF_HUB_ID
DEPENDENCIES = ['empty_sensor_hub']
sensor_ns = cg.esphome_ns.namespace('sensor')
Sensor = sensor_ns.class_('Sensor', sensor.Sensor, cg.Nameable)
CONFIG_SCHEMA = sensor.sensor_schema(UNIT_EMPTY, ICON_EMPTY, 1).extend({
cv.GenerateID(): cv.declare_id(Sensor),
cv.GenerateID(CONF_HUB_ID): cv.use_id(EmptySensorHub)
}).extend(cv.COMPONENT_SCHEMA)
def to_code(config):
paren = yield cg.get_variable(config[CONF_HUB_ID])
var = cg.new_Pvariable(config[CONF_ID])
yield sensor.register_sensor(var, config)
cg.add(paren.register_sensor(var))
|
import numpy as np
import sklearn
import pandas as pd
from sklearn.cluster import KMeans
from skimage.io import imread
import pylab
import math
import matplotlib.pyplot as plot
from skimage import img_as_float as iaf
image = imread('/Users/winniethepooh/PycharmProjects/ml/data-out/_3160f0832cf89866f4cc20e07ddf1a67_parrots.jpg')
image = iaf(image)
r = image[:, :, 0].ravel()
g = image[:, :, 1].ravel()
b = image[:, :, 2].ravel()
rgb = np.transpose(np.vstack((r, g, b)))
clf = KMeans(random_state=241, init='k-means++')
clf.fit(rgb)
clusters = clf.labels_
avg = clf.cluster_centers_#
cls_img = np.reshape(clusters, (-1, 713))
mean_img = np.copy(image)
for cluster in range(0, clf.n_clusters):
mean_r = np.mean(mean_img[:, :, 0][cls_img == cluster])
mean_g = np.mean(mean_img[:, :, 1][cls_img == cluster])
mean_b = np.mean(mean_img[:, :, 2][cls_img == cluster])
mean_img[cls_img == cluster] = avg[cluster]
plot.imshow(mean_img)
med_img = np.copy(image)
for cluster in range(0, clf.n_clusters):
median_r = np.median(med_img[:, :, 0][cls_img == cluster])
median_g = np.median(med_img[:, :, 1][cls_img == cluster])
median_b = np.median(med_img[:, :, 2][cls_img == cluster])
med_img[cls_img == cluster] = avg[cluster]
plot.imshow(med_img)
def PSNR(image1, image2):
mse = np.mean((image1 - image2) ** 2)
psnr = 10 * math.log10(np.max(image1) / mse)
return psnr
psnr1 = PSNR(image, med_img)
psnr2 = PSNR(image, mean_img)
print(psnr1, psnr2)
for i in range(1, 21):
clf = KMeans(random_state=241, init='k-means++', n_clusters=i)
clf.fit(rgb)
clusters = clf.labels_
avg = clf.cluster_centers_
cls_img = np.reshape(clusters, (-1, 713))
img = np.copy(image)
for cluster in range(0, i):
img[cls_img == cluster] = avg[cluster]
print(i, PSNR(image, img))
|
import sys
a = 0
b = 0
max = None
try:
max = int(sys.argv[1])
except:
print("Not an integer")
sys.exit(1)
for n in range(1,max):
if n % 3 == 0 and n % 5 == 0:
print("fizz buzz")
elif n % 3 == 0:
print("fizz")
a += 1
elif n % 5 == 0:
print("buzz")
b += 1
else:
print(n)
print("There are %d fizzes and %d buzzes") % (a,b)
|
from re import compile, match
REGEX = compile(r'((\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\.){4}$')
def ipv4_address(address):
# refactored thanks to @leonoverweel on CodeWars
return bool(match(REGEX, address + '.'))
|
from django.db import models
import uuid
from .constants import MessageConstants
from .managers import ChatMessageManager
from cryptography.fernet import Fernet
def getKey():
return Fernet.generate_key().decode("utf8")
class ChatInfo(models.Model):
member1 = models.ForeignKey(
"loginsignup.Beaver",
on_delete=models.CASCADE,
related_name="memberOne",
related_query_name="memOne",
)
member2 = models.ForeignKey(
"loginsignup.Beaver",
on_delete=models.CASCADE,
related_name="memberTwo",
related_query_name="memTwo",
)
urlparam = models.UUIDField(
"URL Parameter", primary_key=True, default=uuid.uuid4, editable=False
)
publicKey = models.CharField(
"Encryption key", max_length=32, default=getKey, editable=False
)
class Meta:
verbose_name_plural = "Chat Informations"
def __str__(self):
return f"{self.member1} <-> {self.member2}"
# When someone creates a new friend call this method
@classmethod
def createChatInformation(cls, member1, member2):
cls.objects.get_or_create(member1=member1, member2=member2)
# Returns all the url param for a particular user in the form of an
# queryset
@classmethod
def getAllURLParams(cls, beaver):
return cls.objects.select_related("member1").filter(
member1=beaver
) | cls.objects.select_related("member2").filter(member2=beaver)
@classmethod
def convertUUIDToString(cls, uniqueid):
return str(uniqueid).replace("-", "")
@classmethod
def convertStringToUUID(cls, string):
return uuid.UUID(string)
def getAllMessages(self):
getMessages = self.messages.all()
response = []
for messageDetail in getMessages:
messageInfo = {}
messageInfo["message"] = ChatMessage.decryptMessage(
messageDetail.message, urlparam=messageDetail.chatinfo.urlparam
)
messageInfo["sender"] = messageDetail.sender.user.username
response.append(messageInfo)
return response
class ChatMessage(models.Model):
objects = ChatMessageManager()
chatinfo = models.ForeignKey(
ChatInfo,
on_delete=models.CASCADE,
related_name="messages",
related_query_name="message",
)
sender = models.ForeignKey(
"loginsignup.Beaver",
on_delete=models.CASCADE,
related_name="messages_sent",
related_query_name="message_sent",
)
message = models.TextField(null=False)
timeSent = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = "Chat Messages"
def __str__(self):
return f"{self.chatinfo} || Sent : {self.timeSent}"
@classmethod
def decryptMessage(cls, message, urlparam):
chat_info = None
try:
chat_info = ChatInfo.objects.get(urlparam=urlparam)
except BaseException:
return {"status": False, "error": MessageConstants.notAFriend}
publicKey = chat_info.publicKey.encode("utf8")
fernet = Fernet(publicKey)
# Convert the message into byte string and then into string
return fernet.decrypt(message.encode("utf8")).decode("utf8")
# Sender must be a beaver instance
# urlparam must be an UUID
@classmethod
def createMessage(cls, urlparam, sender, message):
chat_info = None
try:
chat_info = ChatInfo.objects.get(urlparam=urlparam)
except BaseException:
return {"status": False, "error": MessageConstants.notAFriend}
publicKey = chat_info.publicKey.encode("utf8")
fernet = Fernet(publicKey)
# Convert the message into byte and then convert the encrypted byte
# string into string
encryptedMessage = fernet.encrypt(message.encode("utf8")).decode("utf8")
cls.objects.create(chatinfo=chat_info, sender=sender, message=encryptedMessage)
return {"status": True, "error": None}
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qt.ui'
#
# Created: Tue Apr 12 14:31:51 2016
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
import pickle
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QLabel, QMessageBox, QPixmap
import popsift
import sys, time
import cv2
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class ExtendedQLabel(QtGui.QLabel):
def __init(self, parent):
QLabel.__init__(self, parent)
def mouseReleaseEvent(self, ev):
self.emit(QtCore.SIGNAL('clicked()'))
class Ui_MainWindow(QtGui.QMainWindow):
BUTTON_IMAGE = 'im.png'
def __init__(self, *args):
QtGui.QMainWindow.__init__(self)
self.setupUi(self)
self.connect(self.ImageButton, QtCore.SIGNAL('clicked()'), self.buttonClicked)
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(563, 554)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Kinnari"))
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setWordWrap(False)
self.label.setMargin(1)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
self.widget = QtGui.QWidget(self.centralwidget)
self.widget.setEnabled(True)
self.widget.setMinimumSize(QtCore.QSize(600, 450))
self.widget.setSizeIncrement(QtCore.QSize(0, 0))
self.widget.setObjectName(_fromUtf8("widget"))
self.gridLayout = QtGui.QGridLayout(self.widget)
self.gridLayout.setMargin(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.ImageButton = ExtendedQLabel(self.widget)
# self.ImageButton.move(0, 0)
self.pix1 = QtGui.QPixmap(self.BUTTON_IMAGE)
self.ImageButton.setPixmap(self.pix1)
# self.ImageButton.setGeometry(QtCore.QRect(0, 0, 1000, 1000))
self.ImageButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.ImageButton.setText(_fromUtf8(""))
self.ImageButton.setObjectName(_fromUtf8("ImageButton"))
self.ImageButton.setScaledContents(True)
# sift=cv2.xfeatures2d.SIFT_create()
self.gridLayout.addWidget(self.ImageButton, 0, 0, 1, 1)
self.progressBar = QtGui.QProgressBar(self.widget)
self.progressBar.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.gridLayout.addWidget(self.progressBar, 1, 0, 1, 1)
self.verticalLayout.addWidget(self.widget)
self.label_2 = QtGui.QLabel(self.centralwidget)
self.label_2.setText(_fromUtf8(""))
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Monotype Corsiva"))
font.setPointSize(20)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.verticalLayout.addWidget(self.label_2)
'''self.widget_2 = QtGui.QWidget(self.centralwidget)
self.widget_2.setObjectName(_fromUtf8("widget_2"))
self.pushButton = QtGui.QPushButton(self.widget_2)
self.pushButton.setGeometry(QtCore.QRect(200, 0, 150, 31))
self.pushButton.setMaximumSize(QtCore.QSize(150, 16777215))
self.pushButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.pushButton.setLayoutDirection(QtCore.Qt.RightToLeft)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.pushButton.clicked.connect(self.annotate)
self.verticalLayout.addWidget(self.widget_2)'''
self.pushButton = QtGui.QCommandLinkButton(self.centralwidget)
self.pushButton.setMaximumSize(QtCore.QSize(130, 16777215))
self.pushButton.setObjectName(_fromUtf8("commandLinkButton"))
self.pushButton.setLayoutDirection(QtCore.Qt.RightToLeft)
self.verticalLayout.addWidget(self.pushButton, QtCore.Qt.AlignHCenter)
self.pushButton.clicked.connect(self.annotate)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 563, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.label.setText(_translate("MainWindow", "Automatic Image Anotation", None))
self.pushButton.setText(_translate("MainWindow", "Anotate Image", None))
def buttonClicked(self):
self.label_2.setText("")
self.progressBar.setProperty("value", 0)
self.file_name = QtGui.QFileDialog.getOpenFileName(self, "Pick a folder")
self.pix = QPixmap(self.file_name)
if not self.pix.isNull():
self.ImageButton.setPixmap(self.pix)
self.flag = 1
else:
self.ImageButton.setPixmap(QtGui.QPixmap(self.BUTTON_IMAGE))
mBox = QMessageBox()
mBox.setText("Not a Valid Image or Image Not Selected!")
mBox.setWindowTitle("ERROR")
mBox.setStandardButtons(QMessageBox.Ok)
mBox.exec_()
def annotate(self):
self.val = 0.0
userdes = popsift.computeKp(str(self.file_name))
f = open('monuments.pkl', 'rb')
tup = pickle.load(f)
maxp = -1
completed = 0
prev = time.time()
while (tup):
self.val = self.val + float(100)/29
self.progressBar.setProperty("value", self.val)
print tup[1]
c = popsift.compare(userdes, tup[0], 0)
print time.ctime()
if c > 0:
if maxp < c:
maxp = c
self.qpath = tup[1]
try:
tup = pickle.load(f)
except:
if maxp != -1:
print self.qpath
ind = self.qpath.rfind("/")
ind2 = -1
for i in ['1', '2', '3', '4', '5']:
ind2 = max(self.qpath.find(i), ind2)
self.label_2.setText("The image is of : " + self.qpath[ind + 1:ind2])
else:
self.label_2.setText("Sorry !No matches found")
break
now = time.time()
print "Total time elapsed :", now - prev
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
window = Ui_MainWindow()
window.show()
sys.exit(app.exec_())
|
#!/usr/bin/env python3
from os import system
from time import sleep
x = []
while True:
x.append('#' * 99999)
sleep(0.1)
system('sleep 9999 &')
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from ticketingsystem import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('django.contrib.auth.urls')),
path ('', views.home, name = 'home'),
path ('dashboard/', views.dashboard, name = 'Dashboard'),
path ('dashboard/tickets/<int:ticket_id>/', views.ticket_detail, name = 'Ticket_Detail'),
path ('dashboard/create-ticket/', views.createTicket, name = 'create-ticket'),
path ('customer_list/', views.customerList, name = 'Customer_List'),
path ('customer_list/create_customer/', views.createCustomer, name = 'create_customer'),
path ('my_tickets/', views.myTickets, name = 'my-tickets'),
path ('stock_list/', views.stockList, name = 'stock-list'),
path ('stock_list/create_stock/', views.createStock, name = 'create_stock'),
path ('stock_list/<int:stock_id>/', views.editStock, name = 'stock_edit'),
]
|
from django.urls import path, re_path
from .apis import *
urlpatterns = [
path('unemployments/add', AddUnemploymentApi.as_view(), name='unemployment_add'),
re_path(r'^unemployments/list/(?:start=(?P<start>(?:19|20)\d{2}(0[1-9]|1[012])))&(?:end=(?P<end>(?:19|20)\d{2}(0[1-9]|1[012])))$', UnemploymentListApi.as_view(), name='unemployment_list'),
path('unemployments/update/<int:unemployment_id>', UpdateUnemploymentApi.as_view(), name='unemployment_update'),
path('unemployments/delete/<int:unemployment_id>', DeleteUnemploymentApi.as_view(), name='unemployment_delete'),
]
|
from .decode import *
def calc_acc(target, output):
output_argmax = output.detach().permute(1, 0, 2).argmax(dim=-1)
target = target.cpu().numpy()
output_argmax = output_argmax.cpu().numpy()
# print(target, output, output_argmax)
a = np.array([decode_target(true) == decode(pred) for true, pred in zip(target, output_argmax)])
return a.mean()
|
import httplib
import os
import signal
import socket
import time
PROJECT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXECFILE = os.path.join(os.path.join(PROJECT_PATH, "src"), "wheatserver")
class WheatServer(object):
def __init__(self, conf_file="", *options):
assert os.access(EXECFILE, os.F_OK)
self.exec_pid = os.fork()
if not conf_file:
conf_file = os.path.join(PROJECT_PATH, "wheatserver.conf")
if not self.exec_pid:
os.execl(EXECFILE, EXECFILE, conf_file, *options)
def __del__(self):
os.kill(self.exec_pid, signal.SIGQUIT);
def server_socket(port):
s = socket.socket()
s.connect(("127.0.0.1", port))
return s
def construct_command(*args):
return "\r\r%s$" % ("\n".join(args))
def pytest_generate_tests(metafunc):
metafunc.parametrize(('port',), [(10827,),(10829,),])
def test_config_command(port):
s = server_socket(port)
s.send(construct_command("config", "logfile-level"))
assert s.recv(100) == "logfile-level: DEBUG"
def test_stat_accuracy(port):
global sync_server, async_server
for i in range(100):
conn = httplib.HTTPConnection("127.0.0.1", port-1, timeout=1);
conn.request("GET", "/")
r1 = conn.getresponse()
assert r1.status == 200
time.sleep(0.1)
os.kill(sync_server.exec_pid, signal.SIGUSR1)
os.kill(async_server.exec_pid, signal.SIGUSR1)
time.sleep(0.1)
s = server_socket(port)
s.send(construct_command("stat", "master"))
assert "Total client: 100" in s.recv(1000)
def test_static_file(port):
time.sleep(0.1)
for i in range(10):
conn = httplib.HTTPConnection("127.0.0.1", port-1, timeout=1);
conn.request("GET", "/static/example.jpg")
r1 = conn.getresponse()
assert r1.status == 200
sync_server = async_server = None
def setup_module(module):
global sync_server, async_server
sync_server = WheatServer("", "--port 10826", "--stat-port 10827",
"--worker-type %s" % "SyncWorker",
"--app-project-path %s" % os.path.join(PROJECT_PATH, "example"),
"--document-root %s" % os.path.join(PROJECT_PATH, "example/"),
"--static-file-dir /static/",
"--protocol Http")
async_server = WheatServer("", "--worker-type %s" % "AsyncWorker",
"--app-project-path %s" % os.path.join(PROJECT_PATH, "example"),
"--document-root %s" % os.path.join(PROJECT_PATH, "example/"),
"--static-file-dir /static/",
"--protocol Http")
time.sleep(0.5)
def teardown_module(module):
global sync_server, async_server
del sync_server, async_server
|
# Массив, который нужно было создать в предыдущей задаче, хранится в переменной mat. Превратите его в вертикальный вектор и напечатайте.
import numpy as np
z = mat.flatten()
print(z.reshape(z.shape+(1,)))
# import numpy as np
# mat = mat.reshape((12,1))
# print(mat)
|
#B
intgr=input()
muldig=1
for i in intgr:
muldig=muldig*int(i)
print(muldig)
|
saludo = "Hola Mundo"
edad = 20
estatura = 1.55
print(saludo, edad, estatura)
|
# 简单dp
# 可以简化为f[i]
MOD = int(1e9+7)
class Solution:
def countHousePlacements(self, n: int) -> int:
# dp[i][0] 表示前 i 块放置房子的总情况数,0表示第i块不放,1表示第i块放
dp = [[0] * 2 for _ in range(n+1)]
dp[1][0] = dp[1][1] = 1
for i in range(2, n+1):
dp[i][1] = dp[i-1][0] % MOD
dp[i][0] = (dp[i-1][0] + dp[i-1][1]) % MOD
return (dp[n][1] + dp[n][0]) * (dp[n][1] + dp[n][0]) % MOD
|
#Cubic spline curve using Hermite interpolation
#@Mkchaudhary 16 sept 2018
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
def init():
glClearColor(0.0,1.0,1.0,0.0)
glColor3f(1.0,0.0,0.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(-10.0,10.0,-10.0,10.0)
def setPixel(xcoordinate,ycoordinate):
glBegin(GL_POINTS)
glVertex2f(xcoordinate,ycoordinate)
glEnd()
glFlush()
def read_controlpoint():
global p,m
n=input("Enter no of control points: ")
p=[[0 for x in range(2)] for y in range(n)]
m=[0 for x in range(n)]
for i in range(n):
p[i][0]=input("Enter control point_x: ")
p[i][1]=input("Enter control point_y: ")
m[i]=input("Enter slope at control point: ")
def draw_cubic_spline():
while True:
read_controlpoint()
n=len(p)
for i in range(n-1):
hermite(p[i],p[i+1],m[i],m[i+1])
print("Enter a decimal no other than 0 to continue")
check=int(input("Enter 0 to exit: "))
if check == 0:
break
else:
pass
def hermite(p1,p2,m1,m2):
u=0.0
while u <= 1.0:
H0_u=2*u*u*u -3*u*u +1
H1_u=-2*u*u*u + 3*u*u
H2_u=u*u*u -2*u*u + u
H3_u=u*u*u - u*u
x=H0_u*p1[0] + H1_u*p2[0] + H2_u*m1 + H3_u*m2
y=H0_u*p1[1] + H1_u*p2[1] + H2_u*m1 + H3_u*m2
setPixel(x,y)
u+=0.001
def Display():
glClear(GL_COLOR_BUFFER_BIT)
draw_cubic_spline()
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(600,600)
glutInitWindowPosition(50,50)
glutCreateWindow("Cubic spline")
glutDisplayFunc(Display)
init()
glutMainLoop()
main()
|
# To get started, copy over hyperparams from another experiment.
# Visit rll.berkeley.edu/gps/hyperparams.html for documentation.
""" Hyperparameters for Laika Reinforcement Learning experiment. """
from __future__ import division
from datetime import datetime
import os.path
import numpy as np
from gps import __file__ as gps_filepath
from gps.agent.ros.agent_laika_ros import AgentLaikaROS
from gps.algorithm.algorithm_traj_opt import AlgorithmTrajOpt
from gps.algorithm.algorithm_mdgps import AlgorithmMDGPS
from gps.algorithm.cost.cost_state import CostState
from gps.algorithm.dynamics.dynamics_prior_gmm import DynamicsPriorGMM
from gps.algorithm.dynamics.dynamics_lr_prior import DynamicsLRPrior
from gps.algorithm.traj_opt.traj_opt_lqr_python import TrajOptLQRPython
from gps.algorithm.traj_opt.traj_opt_pilqr import TrajOptPILQR
from gps.algorithm.policy_opt.policy_opt_tf import PolicyOptTf
from gps.algorithm.policy.lin_gauss_init import init_lqr
from gps.algorithm.policy.policy_prior_gmm import PolicyPriorGMM
from gps.gui.target_setup_gui import load_pose_from_npz
from gps.gui.config import generate_experiment_info
from gps.proto.gps_pb2 import BODY_POSITIONS, BODY_VELOCITIES, CABLE_RL, ACTION
from gps.algorithm.policy_opt.tf_model_example import tf_network
#WHERE IS THE TF POLICY IMPORTED?
SENSOR_DIMS = {
BODY_POSITIONS: 54,
BODY_VELOCITIES: 54,
CABLE_RL: 32,
ACTION: 36, #32 CABLES AND 4 MOTORS ON THE LEGS
}
BASE_DIR = '/'.join(str.split(gps_filepath, '/')[:-2])
EXP_DIR = BASE_DIR + '/../experiments/Laika_Test/'
common = {
'experiment_name': 'my_experiment' + '_' + \
datetime.strftime(datetime.now(), '%m-%d-%y_%H-%M'),
'experiment_dir': EXP_DIR,
'data_files_dir': EXP_DIR + 'data_files/',
'target_filename': EXP_DIR + 'target.npz',
'log_filename': EXP_DIR + 'log.txt',
'conditions': 1,
'iterations':1,
}
if not os.path.exists(common['data_files_dir']):
os.makedirs(common['data_files_dir'])
agent = {
'type': AgentLaikaROS,
'dt': 0.02, #NTRT dt * substeps
'conditions': common['conditions'],
'T': 100,
'substeps': 20,
'state_size' : 140, #wrong
'x0': [np.zeros(140)], #debug: change later
'sensor_dims': SENSOR_DIMS,
'state_include': [BODY_POSITIONS, BODY_VELOCITIES, CABLE_RL],
'obs_include': [BODY_POSITIONS, BODY_VELOCITIES, CABLE_RL],
}
algorithm = {
'type': AlgorithmMDGPS,
'conditions': common['conditions'],
'iterations': 12,
'kl_step': 1.0,
'min_step_mult': 0.5,
'max_step_mult': 3.0,
'policy_sample_mode': 'replace',
}
algorithm['init_traj_distr'] = {
'type': init_lqr,
'init_gains': np.zeros(SENSOR_DIMS[ACTION]),
'init_acc': np.zeros(SENSOR_DIMS[ACTION]),
'init_var': 1.0,
'stiffness': 0.5,
'stiffness_vel': 0.25,
'final_weight': 50,
'dt': agent['dt'],
'T': agent['T'],
}
algorithm['cost'] = {
'type': CostState,
'data_types' : {
#BODY_POSITIONS: {
# 'average': (9,6),
# 'wp': np.ones(54),
# 'target_state': np.ones(54)*100,
#},
BODY_VELOCITIES: {
'average':(9,6),
'wp': [-1.0,0.,0.,0.,0.,0.], #np.ones(6),
'target_state': np.zeros(6),
},
#CABLE_RL: {
# 'wp': np.ones(32),
# 'target_state': np.ones(32)*100,
#},
},
#'alpha': 1e-3,
#'l1':0,
#'l2':1.0,
}
algorithm['dynamics'] = {
'type': DynamicsLRPrior,
'regularization': 1e-6,
'prior': {
'type': DynamicsPriorGMM,
'max_clusters': 40,
'min_samples_per_cluster': 40,
'max_samples': 20,
},
}
algorithm['traj_opt'] = {
'type': TrajOptLQRPython,
'cons_per_step': False,#True,
}
#algorithm['traj_opt'] = {
# 'type': TrajOptPILQR,
# 'covariance_damping':10.0,
# 'kl_threshold': 0.5,
#}
algorithm['policy_opt'] = {
'type': PolicyOptTf,
'network_params': {
'obs_include': [BODY_POSITIONS, BODY_VELOCITIES, CABLE_RL],
'obs_vector_data': [BODY_POSITIONS, BODY_VELOCITIES, CABLE_RL],
'sensor_dims': SENSOR_DIMS,
'n_layers': 2,
'dim_hidden': [100, 100],
},
'network_model': tf_network,
'iterations': 1000,
'weights_file_prefix': EXP_DIR + 'policy',
}
algorithm['policy_prior'] = {
'type': PolicyPriorGMM,
'max_clusters': 50,
'min_samples_per_cluster': 40,
'max_samples': 40,
}
config = {
'iterations': algorithm['iterations'],
'common': common,
'verbose_trials': 1,
'verbose_policy_trials':1,
'agent': agent,
'gui_on': True,
'algorithm': algorithm,
'num_samples': 15,
'image_on':False,
}
common['info'] = generate_experiment_info(config)
|
from django.apps import AppConfig
class EmailMessagesConfig(AppConfig):
name = 'email_messages'
|
import subprocess
user = 'dkdexpota'
password = '8x5h915XXX'
cmd = "git init"
subprocess.call(cmd, shell=True)
cmd = 'git config --global user.name "dkdexpota"'
subprocess.call(cmd, shell=True)
cmd = 'git config --global user.email "artur202080202080@gmail.com"'
subprocess.call(cmd, shell=True)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from gbpclient.gbp.v2_0 import groupbasedpolicy as gbp
from gbpclient.tests.unit import test_cli20
class CLITestV20PolicyClassifierJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20PolicyClassifierJSON, self).setUp()
def test_create_policy_classifier_with_mandatory_params(self):
"""grouppolicy-policy-classifier-create with all mandatory params."""
resource = 'policy_classifier'
cmd = gbp.CreatePolicyClassifier(test_cli20.MyApp(sys.stdout), None)
name = 'my-name'
direction = 'bi'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = ['--tenant-id', tenant_id, '--direction', direction,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id, direction=direction)
def test_create_policy_classifier_with_all_params(self):
"""grouppolicy-policy-classifier-create with all params."""
resource = 'policy_classifier'
cmd = gbp.CreatePolicyClassifier(test_cli20.MyApp(sys.stdout), None)
name = 'my-name'
tenant_id = 'my-tenant'
description = 'My PolicyClassifier'
my_id = 'my-id'
port_range = '10-80'
direction = 'in'
shared = 'true'
for protocol in ['tcp', 'icmp', 'udp', '50']:
args = ['--tenant-id', tenant_id,
'--description', description,
'--protocol', protocol,
'--port-range', port_range,
'--direction', direction,
'--shared', shared,
name]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
tenant_id=tenant_id,
description=description,
protocol=protocol,
port_range=port_range,
direction=direction, shared=shared)
def test_list_policy_classifiers(self):
"""grouppolicy-policy-classifier-list."""
resources = 'policy_classifiers'
cmd = gbp.ListPolicyClassifier(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_policy_classifiers_pagination(self):
"""grouppolicy-policy-classifier-list."""
resources = 'policy_classifiers'
cmd = gbp.ListPolicyClassifier(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_policy_classifiers_sort(self):
"""grouppolicy-policy-classifier-list --sort-key name --sort-key id
--sort-key asc --sort-key desc
"""
resources = 'policy_classifiers'
cmd = gbp.ListPolicyClassifier(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_policy_classifiers_limit(self):
"""grouppolicy-policy-classifier-list -P."""
resources = 'policy_classifiers'
cmd = gbp.ListPolicyClassifier(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_policy_classifier_id(self):
"""grouppolicy-policy-classifier-show test_id."""
resource = 'policy_classifier'
cmd = gbp.ShowPolicyClassifier(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_policy_classifier_id_name(self):
"""grouppolicy-policy-classifier-show."""
resource = 'policy_classifier'
cmd = gbp.ShowPolicyClassifier(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_policy_classifier(self):
"""grouppolicy-policy-classifier-update myid --name myname --tags a b.
"""
resource = 'policy_classifier'
cmd = gbp.UpdatePolicyClassifier(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], })
def test_update_policy_classifier_with_allparams(self):
resource = 'policy_classifier'
port_range = '10-80'
direction = 'in'
cmd = gbp.UpdatePolicyClassifier(test_cli20.MyApp(sys.stdout), None)
my_id = 'someid'
shared = 'true'
for protocol in ['tcp', 'icmp', 'udp', '50']:
body = {
'protocol': protocol,
'port_range': port_range,
'direction': direction,
'shared': shared
}
args = [my_id,
'--protocol', protocol,
'--port-range', port_range,
'--direction', direction,
'--shared', shared, ]
self._test_update_resource(resource, cmd, my_id, args, body)
def test_delete_policy_classifier(self):
"""grouppolicy-policy-classifier-delete my-id."""
resource = 'policy_classifier'
cmd = gbp.DeletePolicyClassifier(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
|
name = input("Enter file:")
if len(name) < 1:
name = "mbox-short.txt"
handle = open(name)
lst = list()
counts = dict()
for line in handle:
if line.startswith("From ") == True:
line = line.split()
tpart = line[5]
tpart = tpart.split(":")
time = tpart[0]
lst.append(time)
else:
continue
for iterv in lst:
counts[iterv] = counts.get(iterv, 0) + 1
tup = list()
#flip; tuple as value in a list
for key, value in counts.items():
tup.append((key, value))
#from the smallest
tup.sort()
for k, v in tup:
print(k, v)
|
import pandas as pd
import pytrec_eval
from collections import defaultdict
import os
class Utils:
@staticmethod
def parse_query_result(filename):
results = []
with open(filename, 'r') as file:
for line in file:
split_line = line.strip("\n").split(" ")
results.append([split_line[1], float(split_line[2])])
return results
@staticmethod
def parse_res_file(filename):
results = []
with open(filename, 'r') as file:
for line in file:
split_line = line.strip("\n").split(" ")
results.append([split_line[0], split_line[2], float(split_line[4])])
return results
@staticmethod
def parse_singleline_topics_file(filepath, tokenise=True):
"""
Parse a file containing topics, one per line
Args:
file_path(str): The path to the topics file
tokenise(bool): whether the query should be tokenised, using Terrier's standard Tokeniser.
If you are using matchop formatted topics, this should be set to False.
Returns:
pandas.Dataframe with columns=['qid','query']
"""
rows = []
from jnius import autoclass
# TODO: this can be updated when 5.3 is released
system = autoclass("java.lang.System")
system.setProperty("SingleLineTRECQuery.tokenise", "true" if tokenise else "false")
slqIter = autoclass("org.terrier.applications.batchquerying.SingleLineTRECQuery")(filepath)
for q in slqIter:
rows.append([slqIter.getQueryId(), q])
return pd.DataFrame(rows, columns=["qid", "query"])
@staticmethod
def parse_trec_topics_file(file_path):
"""
Parse a file containing topics in standard TREC format
Args:
file_path(str): The path to the topics file
Returns:
pandas.Dataframe with columns=['qid','query']
"""
from jnius import autoclass
system = autoclass("java.lang.System")
system.setProperty("TrecQueryTags.doctag", "TOP")
system.setProperty("TrecQueryTags.idtag", "NUM")
system.setProperty("TrecQueryTags.process", "TOP,NUM,TITLE")
system.setProperty("TrecQueryTags.skip", "DESC,NARR")
trec = autoclass('org.terrier.applications.batchquerying.TRECQuery')
tr = trec(file_path)
topics_lst = []
while(tr.hasNext()):
topic = tr.next()
qid = tr.getQueryId()
topics_lst.append([qid, topic])
topics_dt = pd.DataFrame(topics_lst, columns=['qid', 'query'])
return topics_dt
@staticmethod
def parse_qrels(file_path):
"""
Parse a file containing qrels
Args:
file_path(str): The path to the qrels file
Returns:
pandas.Dataframe with columns=['qid','docno', 'label']
"""
df = pd.read_csv(file_path, sep='\s+', names=["qid", "iter", "docno", "label"])
df = df.drop(columns="iter")
df["qid"] = df["qid"].astype(str)
df["docno"] = df["docno"].astype(str)
return df
@staticmethod
def convert_qrels_to_dict(df):
"""
Convert a qrels dataframe to dictionary for use in pytrec_eval
Args:
df(pandas.Dataframe): The dataframe to convert
Returns:
dict: {qid:{docno:label,},}
"""
run_dict_pytrec_eval = defaultdict(dict)
for index, row in df.iterrows():
run_dict_pytrec_eval[row['qid']][row['docno']] = int(row['label'])
return(run_dict_pytrec_eval)
@staticmethod
def convert_res_to_dict(df):
"""
Convert a result dataframe to dictionary for use in pytrec_eval
Args:
df(pandas.Dataframe): The dataframe to convert
Returns:
dict: {qid:{docno:score,},}
"""
run_dict_pytrec_eval = defaultdict(dict)
for index, row in df.iterrows():
run_dict_pytrec_eval[row['qid']][row['docno']] = float(row['score'])
return(run_dict_pytrec_eval)
@staticmethod
def evaluate(res, qrels, metrics=['map', 'ndcg'], perquery=False):
"""
Evaluate the result dataframe with the given qrels
Args:
res: Either a dataframe with columns=['qid', 'docno', 'score'] or a dict {qid:{docno:score,},}
qrels: Either a dataframe with columns=['qid','docno', 'label'] or a dict {qid:{docno:label,},}
metrics(list): A list of strings specifying which evaluation metrics to use. Default=['map', 'ndcg']
perquery(bool): If true return each metric for each query, else return mean metrics. Default=False
"""
if isinstance(res, pd.DataFrame):
batch_retrieve_results_dict = Utils.convert_res_to_dict(res)
else:
batch_retrieve_results_dict = res
if isinstance(qrels, pd.DataFrame):
qrels_dic = Utils.convert_qrels_to_dict(qrels)
else:
qrels_dic = qrels
evaluator = pytrec_eval.RelevanceEvaluator(qrels_dic, set(metrics))
result = evaluator.evaluate(batch_retrieve_results_dict)
if perquery:
return result
else:
measures_sum = {}
mean_dict = {}
for val in result.values():
for measure, measure_val in val.items():
measures_sum[measure] = measures_sum.get(measure, 0.0) + measure_val
for measure, value in measures_sum.items():
mean_dict[measure] = value / len(result.values())
return mean_dict
# create a dataframe of string of queries or a list or tuple of strings of queries
@staticmethod
def form_dataframe(query):
"""
Convert either a string or a list of strings to a dataframe for use as topics in retrieval.
Args:
query: Either a string or a list of strings
Returns:
dataframe with columns=['qid','query']
"""
if isinstance(query, pd.DataFrame):
return query
elif isinstance(query, str):
return pd.DataFrame([["1", query]], columns=['qid', 'query'])
# if queries is a list or tuple
elif isinstance(query, list) or isinstance(query, tuple):
# if the list or tuple is made of strings
if query != [] and isinstance(query[0], str):
indexed_query = []
for i, item in enumerate(query):
# all elements must be of same type
assert isinstance(item, str), f"{item} is not a string"
indexed_query.append([str(i + 1), item])
return pd.DataFrame(indexed_query, columns=['qid', 'query'])
@staticmethod
def get_files_in_dir(dir):
"""
Returns all the files present in a directory and its subdirectories
Args:
dir(str): The directory containing the files
Returns:
paths(list): A list of the paths to the files
"""
lst = []
zip_paths = []
for (dirpath, dirnames, filenames) in os.walk(dir):
lst.append([dirpath, filenames])
for sublist in lst:
for zip in sublist[1]:
zip_paths.append(os.path.join(sublist[0], zip))
return zip_paths
|
def add_menu():
pass
def default_menu():
pass
def update_menu():
pass
|
#some manual formatting required
import csv
with open('levelTemp.js', 'w') as the_file:
#change this for different output levels
with open('level/Whale Defense Force Level - Sheet1(1).csv', 'rb') as f:
reader = csv.reader(f)
the_file.write('var GAME_LEVELS = [\n [\n')
for row in reader:
the_file.write('"')
for character in row:
char = character
if len(character) is 0:
char = " "
the_file.write(char)
the_file.write('",\n')
the_file.write(']\n];\n')
the_file.write('if (typeof module != "undefined" && module.exports)')
the_file.write('module.exports = GAME_LEVELS;')
|
# test 1
# 使用json存储运行过程中产生的数据
# 使用json.dump(date,file)存储数据
import json
num = ['1','2','3','4']
with open("num.json", 'w') as file:
json.dump(num, file)
# 使用json.load(file)加载json中的数据
with open('num.json') as file:
number = json.load(file)
print(number)
|
# Generated by Django 3.0.3 on 2020-03-01 15:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='livre',
options={'ordering': ['titre']},
),
migrations.AddField(
model_name='livre',
name='slug_title',
field=models.SlugField(default=''),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 12:46:45 2019
@author: thomas
"""
import os, shutil
import pandas as pd
import numpy as np
import random
import zipfile
import matplotlib
import matplotlib.pyplot as plt
from shutil import copyfile
import pathlib
#Script will generate randomly placed circles in a 2D plane
#Script will then CheckCircleBounds
#If circles overlap, then they will move apart from one another in opposite directions
#Continue above (2,3) until no circle overlaps
#When circles collide, they will try to push off like a collision
cwd_PYTHON = os.getcwd()
#CONSTANTS GRID PLACEMENT
RADIUSLARGE = 0.002
#CONSTANTS SPHEROBOT
RList = [3.0,4.0,5.0]
structureNames = ['skeletonES','botupES','botlowES']
def MakeDirectory(directory,seed):
if not os.path.exists(directory+'/'+str(seed)):
os.makedirs(directory+'/'+str(seed))
return
def zipFiles(src,dst):
zf = zipfile.ZipFile('%s.zip' % (dst), 'w', zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
print('zipping %s as %s' % (os.path.join(dirname, filename),
arcname))
zf.write(absname, arcname)
zf.close()
def StoreVertexInfo():
#BOTUP
#Read vertex file line by line and save the values in a list using the \n delimiter
linesup = [line.strip() for line in open('botupES.vertex')]
#Break each list element into an array of numbers using the space delimiter
linesup = [line.split() for line in linesup]
nvertup = int(linesup[0][0])
#Allocate Array for Large Sphere Vertex Positions
vertUp = np.zeros((2,nvertup))
#Store Vertices
for i in range(1,nvertup+1):
vertUp[0,i-1] = float(linesup[i][0])
vertUp[1,i-1] = float(linesup[i][1])
#BOTLOW
#Read vertex file line by line and save the values in a list using the \n delimiter
lineslow = [line.strip() for line in open('botlowES.vertex')]
#Break each list element into an array of numbers using the space delimiter
lineslow = [line.split() for line in lineslow]
nvertlow = int(lineslow[0][0])
#Allocate Array for Small Sphere Vertex Positions
vertLow = np.zeros((2,nvertlow))
#Store Vertices
for i in range(1,nvertlow+1):
vertLow[0,i-1] = float(lineslow[i][0])
vertLow[1,i-1] = float(lineslow[i][1])
#Read vertex file line by line and save the values in a list using the \n delimiter
linesskel = [line.strip() for line in open('skeletonES.vertex')]
#Break each list element into an array of numbers using the space delimiter
linesskel = [line.split() for line in linesskel]
nvertskel = int(linesskel[0][0])
#Allocate Array for Skeleton Vertex Positions
vertSkel = np.zeros((2,nvertskel))
#Store Vertices
for i in range(1,nvertskel+1):
vertSkel[0,i-1] = float(linesskel[i][0])
vertSkel[1,i-1] = float(linesskel[i][1])
nvert = [nvertskel,nvertup,nvertlow]
vertList = [vertSkel,vertUp,vertLow]
return (vertList,nvert)
def DisplaceSpherobots(vertList, nvert, structureNames, R, Theta, idxT, idxConfig):
#First Rotate based on idxConfig
#Allocate Arrays
rotationMatrix = np.zeros((2,2))
if(idxConfig == 1):
theta = np.pi/2.0
else:
theta = 0.0
#rotatedPosition = np.zeros((Nbots,2,nvert))
#Generate Random Angles
#print('theta[%i] = %.3e' %(i,theta[i]))
rotationMatrix[0,0] = np.cos(theta)
rotationMatrix[0,1] = -1.0*np.sin(theta)
rotationMatrix[1,0] = np.sin(theta)
rotationMatrix[1,1] = np.cos(theta)
#Displaces the spheres where they are a distance R apart at an angle Theta
#x1 and x2
x1Arr = np.zeros(2)
x1Arr[0], x1Arr[1] = -0.5*R*RADIUSLARGE*np.cos(Theta), 0.0025
#print(np.sin(Theta))
x2Arr = np.zeros(2)
x2Arr[0], x2Arr[1] = 0.5*R*RADIUSLARGE*np.cos(Theta), R*RADIUSLARGE*np.sin(Theta)+0.0025 #Account for CM
xList = [x1Arr, x2Arr]
if(idxConfig == 0):
pathlib.Path('../../Structures/Periodic/EqualSpheres/'+str(int(R))+'/PI'+str(idxT)+'/Parallel/').mkdir(parents=True, exist_ok=True)
cwd_PARALLEL = cwd_PYTHON + '/../../Structures/Periodic/EqualSpheres/'+str(int(R))+'/PI'+str(idxT)+'/Parallel/'
else:
pathlib.Path('../../Structures/Periodic/EqualSpheres/'+str(int(R))+'/PI'+str(idxT)+'/Perp/').mkdir(parents=True, exist_ok=True)
cwd_PERP = cwd_PYTHON + '/../../Structures/Periodic/EqualSpheres/'+str(int(R))+'/PI'+str(idxT)+'/Perp/'
#Generate Figure to show Pairwise Placement
fig = plt.figure(num=0,figsize=(4,4),dpi=120)
ax = fig.add_subplot(111)
ax.set_title('Pairwise Init Config: ES: \nR = %.4f m Theta = PI*%.2f m'%(R*0.002,Theta/np.pi))
ax.axis([-0.05,0.05,-0.05,0.05])
#Displace Spherobots
for idxBot in range(2):
dispArr = xList[idxBot]
#print(dispArr)
for idxName in range(len(vertList)):
name = structureNames[idxName]
vertPos = vertList[idxName].copy()
if(idxConfig == 0):
f = open(cwd_PARALLEL+name+str(idxBot+1)+'.vertex','w')
#Copy spring/beam files for 'name'
copyfile(name+'.spring',cwd_PARALLEL+name+str(idxBot+1)+'.spring')
else:
f = open(cwd_PERP+name+str(idxBot+1)+'.vertex','w')
#Copy spring/beam files for a'name'
copyfile(name+'.spring',cwd_PERP+name+str(idxBot+1)+'.spring')
f.write('%i\n'%nvert[idxName])
for idxVert in range(nvert[idxName]):
#Rotate Skeleton2 by Theta given idxConfig
if(idxName == 0 and idxBot == 1):
#print('b4: idxVert = %i: xPos = %.5e: yPos = %.5e'%(idxVert,vertPos[0,idxVert],vertPos[1,idxVert]))
if(idxVert <= 12):
CM = np.array([0.0,0.0025])
vertPos[:,idxVert] = rotationMatrix.dot(vertPos[:,idxVert] - CM)
vertPos[:,idxVert] += CM
else:
CM = np.array([0.000,-0.0025])
vertPos[:,idxVert] = rotationMatrix.dot(vertPos[:,idxVert].copy() - CM)
vertPos[:,idxVert] += CM
#print('a4: idxVert = %i: xPos = %.5e: yPos = %.5e'%(idxVert,vertPos[0,idxVert],vertPos[1,idxVert]))
#Displace Spherobot by xList[idxBot]
vertPos[:,idxVert] += dispArr[:]
#Rotate 90 degrees if Perp
if(idxConfig == 1 and idxBot == 1):
if(idxName == 0):
#Skeleton
if(idxVert <=12):
vertPos[0,idxVert] -= 0.005
vertPos[1,idxVert] -= 0.005
else:
vertPos[0,idxVert] += 0.00
vertPos[1,idxVert] += 0.00
elif(idxName == 1):
#Upper Sphere
vertPos[0,idxVert] -= 0.0025
vertPos[1,idxVert] -= 0.0025
elif(idxName == 2):
#Lower Sphere
vertPos[0,idxVert] += 0.0025
vertPos[1,idxVert] += 0.0025
#Write vertex coordinates down in .vertex file
if(idxVert == nvert[idxName] - 1):
f.write('%.5e %.5e'%(vertPos[0,idxVert],vertPos[1,idxVert]))
else:
f.write('%.5e %.5e\n' %(vertPos[0,idxVert],vertPos[1,idxVert]))
f.close()
#Plot Displaced Spherobots
if(idxName == 0):
#Skeleton
ax.plot(vertPos[0,:],vertPos[1,:],'ro',zorder=5,markersize=2)
ax.plot(vertPos[0,13],vertPos[1,13],'bo',zorder=6,markersize=2)
ax.plot(vertPos[0,0],vertPos[1,0],'bo',zorder=6,markersize=2)
else:
#Large and Small Spheres
ax.plot(vertPos[0,:],vertPos[1,:],'ko',zorder=1,markersize=2)
ax.axis([-4.5*R*0.002,4.5*R*0.002,-4.5*R*0.002,4.5*R*0.002])
fig.tight_layout()
if(idxConfig == 0):
#Parallel Configuration
fig.savefig(cwd_PARALLEL+'InitConfig.png')
else:
#PerpS Configuration
fig.savefig(cwd_PERP+'InitConfig.png')
fig.clf()
plt.close()
return
if __name__ == '__main__':
#Generate Placement for Pairwise Configurations: Parallel and Anti-Parallel
#1) Read in .vertex files
#2) Store Vertices in array (vertexPos array)
#3) Loop over R and Theta
#4) Displace spherobot 1 by x1 and spherobot2 by x2
#5) x1 = (-1.0*Lcos(theta),0.0); x2 = (Lcos(theta),Lsin(theta))
#6) If Antiparallel, switch ind sphere locations: LS -= 0.005; SS += 0.005
#7) Write new vertex positions to new .vertex file
#8) copy .beam and .spring files over to same dir as new .vertex files
#9) Zip newly created files
#1)Read in .vertex files
#2)Store Vertices in array
vertList, nvert = StoreVertexInfo()
nvertskel, nvertup, nvertlow = nvert[0], nvert[1], nvert[2]
vertSkel, vertUp, vertLow = vertList[0], vertList[1], vertList[2]
#3)Loop over R and Theta (Parallel, Anti-Parallel, PerpL, and PerpS)
for idxConfig in range(2):
#for idxR in range(0,3):
for idxR in range(len(RList)):
#R = 5.0 + 2.5*idxR
#R = 5.0 + 1.0*idxR
R = RList[idxR]
for idxT in range(4):
Theta = -1.0*np.pi/2.0 + idxT*np.pi/4.0
#4) Displace spherobot by x1 and x2
DisplaceSpherobots(vertList, nvert, structureNames, R, Theta, idxT,idxConfig)
|
"""Git specific support and addon."""
import argparse
import os
import pickle
import shlex
import subprocess
import sys
from collections import UserDict
from contextlib import AbstractContextManager
from functools import partial
from pathspec import PathSpec
from pkgcore.ebuild import cpv
from pkgcore.ebuild.atom import MalformedAtom
from pkgcore.ebuild.atom import atom as atom_cls
from pkgcore.repository import multiplex
from pkgcore.repository.util import SimpleTree
from pkgcore.restrictions import packages, values
from snakeoil.cli.exceptions import UserException
from snakeoil.demandload import demand_compile_regexp
from snakeoil.fileutils import AtomicWriteFile
from snakeoil.iterables import partition
from snakeoil.klass import jit_attr
from snakeoil.osutils import pjoin
from snakeoil.process import CommandNotFound, find_binary
from snakeoil.process.spawn import spawn_get_output
from snakeoil.strings import pluralism
from . import base, caches, objects
from .checks import GitCheck
from .log import logger
# hacky path regexes for git log parsing, proper validation is handled later
_ebuild_path_regex_raw = '([^/]+)/([^/]+)/([^/]+)\\.ebuild'
_ebuild_path_regex = '(?P<category>[^/]+)/(?P<PN>[^/]+)/(?P<P>[^/]+)\\.ebuild'
demand_compile_regexp('ebuild_ADM_regex', fr'^(?P<status>[ADM])\t{_ebuild_path_regex}$')
demand_compile_regexp('ebuild_R_regex', fr'^(?P<status>R)\d+\t{_ebuild_path_regex}\t{_ebuild_path_regex_raw}$')
demand_compile_regexp('eclass_regex', r'^eclass/(?P<eclass>\S+)\.eclass$')
class GitCommit:
"""Git commit objects."""
def __init__(self, hash, commit_date, author, committer, message):
self.hash = hash
self.commit_date = commit_date
self.author = author
self.committer = committer
self.message = message
def __str__(self):
return self.hash
def __eq__(self, other):
return self.hash == other.hash
class GitPkgChange:
"""Git package change objects."""
def __init__(self, atom, status, commit):
self.atom = atom
self.status = status
self.commit = commit
class ParsedGitRepo(UserDict, caches.Cache):
"""Parse repository git logs."""
# git command to run on the targeted repo
_git_cmd = 'git log --name-status --date=short --diff-filter=ARMD'
def __init__(self, repo, commit=None, **kwargs):
super().__init__()
self.location = repo.location
self._cache = GitAddon.cache
if commit is None:
self.commit = 'origin/HEAD..master'
self._pkg_changes(commit=self.commit, **kwargs)
else:
self.commit = commit
self._pkg_changes(**kwargs)
def update(self, commit, **kwargs):
"""Update an existing repo starting at a given commit hash."""
self._pkg_changes(commit=self.commit, **kwargs)
self.commit = commit
@staticmethod
def _parse_file_line(line):
"""Pull atoms and status from file change lines."""
# match initially added ebuilds
match = ebuild_ADM_regex.match(line)
if match:
status = match.group('status')
category = match.group('category')
pkg = match.group('P')
try:
return atom_cls(f'={category}/{pkg}'), status
except MalformedAtom:
return None
# match renamed ebuilds
match = ebuild_R_regex.match(line)
if match:
status = match.group('status')
category = match.group('category')
pkg = match.group('P')
try:
return atom_cls(f'={category}/{pkg}'), status
except MalformedAtom:
return None
@classmethod
def parse_git_log(cls, repo_path, commit=None, pkgs=False, verbosity=-1):
"""Parse git log output."""
cmd = shlex.split(cls._git_cmd)
# custom git log format, see the "PRETTY FORMATS" section of the git
# log man page for details
format_lines = [
'# BEGIN COMMIT',
'%h', # abbreviated commit hash
'%cd', # commit date
'%an <%ae>', # Author Name <author@email.com>
'%cn <%ce>', # Committer Name <committer@email.com>
'%B', # commit message
'# END MESSAGE BODY',
]
format_str = '%n'.join(format_lines)
cmd.append(f'--pretty=tformat:{format_str}')
if commit:
if '..' in commit:
cmd.append(commit)
else:
cmd.append(f'{commit}..origin/HEAD')
else:
cmd.append('origin/HEAD')
git_log = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=repo_path)
line = git_log.stdout.readline().decode().strip()
if git_log.poll():
error = git_log.stderr.read().decode().strip()
logger.warning('skipping git checks: %s', error)
return
count = 1
with base.ProgressManager(verbosity=verbosity) as progress:
while line:
hash = git_log.stdout.readline().decode().strip()
commit_date = git_log.stdout.readline().decode().strip()
author = git_log.stdout.readline().decode('utf-8', 'replace').strip()
committer = git_log.stdout.readline().decode('utf-8', 'replace').strip()
message = []
while True:
line = git_log.stdout.readline().decode('utf-8', 'replace').strip('\n')
if line == '# END MESSAGE BODY':
# drop trailing newline if it exists
if not message[-1]:
message.pop()
break
message.append(line)
# update progress output
progress(f'{hash} commit #{count}, {commit_date}')
count += 1
commit = GitCommit(hash, commit_date, author, committer, message)
if not pkgs:
yield commit
# file changes
while True:
line = git_log.stdout.readline().decode()
if line == '# BEGIN COMMIT\n' or not line:
break
if pkgs:
parsed = cls._parse_file_line(line.strip())
if parsed is not None:
atom, status = parsed
yield GitPkgChange(atom, status, commit)
def _pkg_changes(self, local=False, **kwargs):
"""Parse package changes from git log output."""
seen = set()
for pkg in self.parse_git_log(self.location, pkgs=True, **kwargs):
atom = pkg.atom
key = (atom, pkg.status)
if key not in seen:
seen.add(key)
self.data.setdefault(atom.category, {}).setdefault(
atom.package, {}).setdefault(pkg.status, []).append((
atom.fullver,
pkg.commit.commit_date,
pkg.commit.hash if not local else pkg.commit,
))
class _GitCommitPkg(cpv.VersionedCPV):
"""Fake packages encapsulating commits parsed from git log."""
def __init__(self, category, package, status, version, date, commit):
super().__init__(category, package, version)
# add additional attrs
sf = object.__setattr__
sf(self, 'date', date)
sf(self, 'status', status)
sf(self, 'commit', commit)
class _HistoricalRepo(SimpleTree):
"""Repository encapsulating historical git data."""
# selected pkg status filter
_status_filter = {'A', 'R', 'M', 'D'}
def __init__(self, *args, **kwargs):
kwargs.setdefault('pkg_klass', _GitCommitPkg)
super().__init__(*args, **kwargs)
def _get_versions(self, cp):
versions = []
for status, data in self.cpv_dict[cp[0]][cp[1]].items():
if status in self._status_filter:
versions.append((status, data))
return versions
def _internal_gen_candidates(self, candidates, sorter, raw_pkg_cls, **kwargs):
for cp in sorter(candidates):
yield from sorter(
raw_pkg_cls(cp[0], cp[1], status, *commit)
for status, data in self.versions.get(cp, ())
for commit in data)
class GitChangedRepo(_HistoricalRepo):
"""Historical git repo consisting of the latest changed packages."""
class GitModifiedRepo(_HistoricalRepo):
"""Historical git repo consisting of the latest modified packages."""
_status_filter = {'A', 'R', 'M'}
class GitAddedRepo(_HistoricalRepo):
"""Historical git repo consisting of added packages."""
_status_filter = {'A', 'R'}
class GitRemovedRepo(_HistoricalRepo):
"""Historical git repo consisting of removed packages."""
_status_filter = {'D'}
class _ScanCommits(argparse.Action):
"""Argparse action that enables git commit checks."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, parser, namespace, value, option_string=None):
namespace.forced_checks.extend(
name for name, cls in objects.CHECKS.items() if issubclass(cls, GitCheck))
setattr(namespace, self.dest, value)
class GitStash(AbstractContextManager):
"""Context manager for stashing untracked or modified/uncommitted files.
This assumes that no git actions are performed on the repo while a scan is
underway otherwise `git stash` usage may cause issues.
"""
def __init__(self, parser, repo):
self.parser = parser
self.repo = repo
self._stashed = False
def __enter__(self):
# check for untracked or modified/uncommitted files
p = subprocess.run(
['git', 'ls-files', '-mo', '--exclude-standard'],
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL,
cwd=self.repo.location, encoding='utf8')
if p.returncode != 0 or not p.stdout:
return
# stash all existing untracked or modified/uncommitted files
p = subprocess.run(
['git', 'stash', 'push', '-u', '-m', 'pkgcheck scan --commits'],
stdout=subprocess.DEVNULL, stderr=subprocess.PIPE,
cwd=self.repo.location, encoding='utf8')
if p.returncode != 0:
error = p.stderr.splitlines()[0]
self.parser.error(f'git failed stashing files: {error}')
self._stashed = True
def __exit__(self, _exc_type, _exc_value, _traceback):
if self._stashed:
# apply previously stashed files back to the working tree
p = subprocess.run(
['git', 'stash', 'pop'],
stdout=subprocess.DEVNULL, stderr=subprocess.PIPE,
cwd=self.repo.location, encoding='utf8')
if p.returncode != 0:
error = p.stderr.splitlines()[0]
self.parser.error(f'git failed applying stash: {error}')
class GitAddon(base.Addon, caches.CachedAddon):
"""Git repo support for various checks.
Pkgcheck can create virtual package repos from a given git repo's history
in order to provide more info for checks relating to stable requests,
outdated blockers, or local commits. These virtual repos are cached and
updated every run if new commits are detected.
Git repos must have a supported config in order to work properly.
Specifically, pkgcheck assumes that both origin and master branches exist
and relate to the upstream and local development states, respectively.
Additionally, the origin/HEAD ref must exist. If it doesn't, running ``git
fetch origin`` should create it. Otherwise, using ``git remote set-head
origin master`` or similar will also create the reference.
"""
# cache registry
cache = caches.CacheData(type='git', file='git.pickle', version=4)
@classmethod
def mangle_argparser(cls, parser):
group = parser.add_argument_group('git', docs=cls.__doc__)
group.add_argument(
'--commits', action=_ScanCommits, nargs='?',
metavar='COMMIT', const='origin', default=None,
help="determine scan targets from local git repo commits",
docs="""
For a local git repo, pkgcheck will determine targets to scan
from the committed changes compared to a given reference that
defaults to the repo's origin.
For example, to scan all the packages that have been changed in
the current branch compared to the branch named 'old' use
``pkgcheck scan --commits old``. For two separate branches
named 'old' and 'new' use ``pkgcheck scan --commits old..new``.
Note that will also enable eclass-specific checks if it
determines any commits have been made to eclasses.
""")
@staticmethod
def _committed_eclass(committed, eclass):
"""Stub method for matching eclasses against commits."""
return eclass in committed
@staticmethod
def _pkg_atoms(paths):
"""Filter package atoms from commit paths."""
for x in paths:
try:
yield atom_cls(os.sep.join(x.split(os.sep, 2)[:2]))
except MalformedAtom:
continue
@classmethod
def check_args(cls, parser, namespace):
if namespace.commits:
if namespace.targets:
targets = ' '.join(namespace.targets)
s = pluralism(namespace.targets)
parser.error(f'--commits is mutually exclusive with target{s}: {targets}')
ref = namespace.commits
repo = namespace.target_repo
targets = list(repo.category_dirs)
if os.path.isdir(pjoin(repo.location, 'eclass')):
targets.append('eclass')
try:
p = subprocess.run(
['git', 'diff', '--cached', ref, '--name-only'] + targets,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=repo.location, encoding='utf8')
except FileNotFoundError:
parser.error('git not available to determine targets for --commits')
if p.returncode != 0:
error = p.stderr.splitlines()[0]
parser.error(f'failed running git: {error}')
elif not p.stdout:
# no changes exist, exit early
parser.exit()
pkgs, eclasses = partition(
p.stdout.splitlines(), predicate=lambda x: x.startswith('eclass/'))
pkgs = sorted(cls._pkg_atoms(pkgs))
eclasses = filter(None, (eclass_regex.match(x) for x in eclasses))
eclasses = sorted(x.group('eclass') for x in eclasses)
restrictions = []
if pkgs:
restrict = packages.OrRestriction(*pkgs)
restrictions.append((base.package_scope, restrict))
if eclasses:
func = partial(cls._committed_eclass, frozenset(eclasses))
restrict = values.AnyMatch(values.FunctionRestriction(func))
restrictions.append((base.eclass_scope, restrict))
# no pkgs or eclasses to check, exit early
if not restrictions:
parser.exit()
namespace.contexts.append(GitStash(parser, repo))
namespace.restrictions = restrictions
def __init__(self, *args):
super().__init__(*args)
# disable git support if git isn't installed
if self.options.cache['git']:
try:
find_binary('git')
except CommandNotFound:
self.options.cache['git'] = False
# mapping of repo locations to their corresponding git repo caches
self._cached_repos = {}
@jit_attr
def gitignore(self):
"""Load a repo's .gitignore and .git/info/exclude files for path matching."""
patterns = []
for path in ('.gitignore', '.git/info/exclude'):
try:
with open(pjoin(self.options.target_repo.location, path)) as f:
patterns.extend(f)
except FileNotFoundError:
pass
except IOError as e:
logger.warning(f'failed reading {path!r}: {e}')
return PathSpec.from_lines('gitwildmatch', patterns)
def gitignored(self, path):
"""Determine if a given path in a repository is matched by .gitignore settings."""
if path.startswith(self.options.target_repo.location):
repo_prefix_len = len(self.options.target_repo.location) + 1
path = path[repo_prefix_len:]
return self.gitignore.match_file(path)
@staticmethod
def get_commit_hash(repo_location, commit='origin/HEAD'):
"""Retrieve a git repo's commit hash for a specific commit object."""
if not os.path.exists(pjoin(repo_location, '.git')):
raise ValueError
ret, out = spawn_get_output(
['git', 'rev-parse', commit], cwd=repo_location)
if ret != 0:
raise ValueError(
f'failed retrieving {commit} commit hash '
f'for git repo: {repo_location}')
return out[0].strip()
def update_cache(self, force=False):
"""Update related cache and push updates to disk."""
try:
# running from scan subcommand
repos = self.options.target_repo.trees
except AttributeError:
# running from cache subcommand
repos = self.options.domain.ebuild_repos
if self.options.cache['git']:
for repo in repos:
try:
commit = self.get_commit_hash(repo.location)
except ValueError:
continue
# initialize cache file location
cache_file = self.cache_file(repo)
git_repo = None
cache_repo = True
if not force:
# try loading cached, historical repo data
try:
with open(cache_file, 'rb') as f:
git_repo = pickle.load(f)
if git_repo.version != self.cache.version:
logger.debug('forcing git repo cache regen due to outdated version')
os.remove(cache_file)
git_repo = None
except FileNotFoundError:
pass
except (AttributeError, EOFError, ImportError, IndexError) as e:
logger.debug('forcing git repo cache regen: %s', e)
os.remove(cache_file)
git_repo = None
if (git_repo is not None and
repo.location == getattr(git_repo, 'location', None)):
if commit != git_repo.commit:
old, new = git_repo.commit[:13], commit[:13]
print(
f'updating {repo} git repo cache: {old} -> {new}',
file=sys.stderr,
)
git_repo.update(commit, verbosity=self.options.verbosity)
else:
cache_repo = False
else:
print(
f'creating {repo} git repo cache: {commit[:13]}',
file=sys.stderr,
)
git_repo = ParsedGitRepo(repo, commit, verbosity=self.options.verbosity)
if git_repo:
self._cached_repos[repo.location] = git_repo
# push repo to disk if it was created or updated
if cache_repo:
try:
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
f = AtomicWriteFile(cache_file, binary=True)
f.write(pickle.dumps(git_repo))
f.close()
except IOError as e:
msg = f'failed dumping git pkg repo: {cache_file!r}: {e.strerror}'
raise UserException(msg)
def cached_repo(self, repo_cls, target_repo=None):
cached_repo = None
if target_repo is None:
target_repo = self.options.target_repo
if self.options.cache['git']:
git_repos = []
for repo in target_repo.trees:
git_repo = self._cached_repos.get(repo.location, None)
# only enable repo queries if history was found, e.g. a
# shallow clone with a depth of 1 won't have any history
if git_repo:
git_repos.append(repo_cls(git_repo, repo_id=f'{repo.repo_id}-history'))
else:
logger.warning('skipping git checks for %s repo', repo)
break
else:
if len(git_repos) > 1:
cached_repo = multiplex.tree(*git_repos)
elif len(git_repos) == 1:
cached_repo = git_repos[0]
return cached_repo
def commits_repo(self, repo_cls, target_repo=None, options=None):
options = options if options is not None else self.options
if target_repo is None:
target_repo = options.target_repo
git_repo = {}
repo_id = f'{target_repo.repo_id}-commits'
if options.cache['git']:
try:
origin = self.get_commit_hash(target_repo.location)
master = self.get_commit_hash(target_repo.location, commit='master')
if origin != master:
git_repo = ParsedGitRepo(target_repo, local=True)
except ValueError as e:
if str(e):
logger.warning('skipping git commit checks: %s', e)
return repo_cls(git_repo, repo_id=repo_id)
def commits(self, repo=None):
path = repo.location if repo is not None else self.options.target_repo.location
commits = iter(())
if self.options.cache['git']:
try:
origin = self.get_commit_hash(path)
master = self.get_commit_hash(path, commit='master')
if origin != master:
commits = ParsedGitRepo.parse_git_log(path, commit='origin/HEAD..master')
except ValueError as e:
if str(e):
logger.warning('skipping git commit checks: %s', e)
return commits
|
from django.test import SimpleTestCase
from django.test.utils import override_settings
from ..checks import settings_checks
class CheckSessionCookieSecureTest(SimpleTestCase):
@override_settings(USE_TZ=False)
def test_use_tz_false(self):
"""If USE_TZ is off provide one warning."""
self.assertEqual(
settings_checks.check_use_tz_enabled(None),
[settings_checks.W001]
)
@override_settings(USE_TZ=True)
def test_use_tz_true(self):
"""If USE_TZ is on, there's no warning about it."""
self.assertEqual(settings_checks.check_use_tz_enabled(None), [])
|
r = request.get("https://api.")
|
import tensorflow as tf
a = tf.placeholder(tf.float32, name='a')
b = tf.placeholder(tf.float32, name='b')
adder_node = tf.add(a, b, name='add')
sess = tf.Session()
print(sess.run(adder_node, {a: 3, b: 4.5}))
print(sess.run(adder_node, {a: [1, 3], b: [2, 4]}))
writer = tf.summary.FileWriter('placeholder_add', sess.graph)
writer.close()
|
# Faça um Programa que peça a temperatura em graus Farenheit,
# transforme e mostre a temperatura em graus Celsius.
# C = (5 * (F-32) / 9).
# entrada de dados
farenheit = float(input('Informe a temperatura em graus Farenheit: '))
# processamento
celsius = 5 * (farenheit - 32) / 9
mensagem = '{} farenheit equivalem a {:.0f} celsius'.format(farenheit, celsius)
# saída de dados
print(mensagem)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.