repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
itsMagondu/IoTNeuralNetworks | noisefilter/apps/filter/migrations/0004_kalmanresult.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-09-08 22:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filter', '0003_annresult'),
]
operations = [
migrations.CreateModel(
name='KalmanResult',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prediction', models.FloatField(blank=True, null=True)),
('iterations', models.IntegerField(blank=True, null=True)),
('seconds', models.IntegerField(blank=True, null=True)),
('initial_guess', models.IntegerField(blank=True, null=True)),
('truevalue', models.FloatField(blank=True, null=True)),
('added', models.DateTimeField(auto_now_add=True)),
],
),
]
|
itsMagondu/IoTNeuralNetworks | noisefilter/generate_noisy_signal.py | <reponame>itsMagondu/IoTNeuralNetworks
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "noisefilter.settings.development")
import django
django.setup()
from filter.models import TrainingExample
import numpy as np
pure = np.linspace(18, 22, 100)
noise = np.random.normal(0, 0.5, 100)
signal = pure + noise
count = 0
for item in signal:
TrainingExample.objects.create(datainput=item, dataoutput=pure[count])
count += 1
|
itsMagondu/IoTNeuralNetworks | noisefilter/noisefilter/settings/development.py | <gh_stars>0
from .base import *
DEBUG = True
INTERNAL_IPS = ["127.0.0.1"]
SECRET_KEY = "secret"
## DATABASE SETTINGS
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'development.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
}
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache"
}
}
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
## DJANGO DEBUG TOOLBAR SETTINGS
# https://django-debug-toolbar.readthedocs.org
def show_toolbar(request):
return not request.is_ajax() and request.user and request.user.is_superuser
MIDDLEWARE_CLASSES += ["debug_toolbar.middleware.DebugToolbarMiddleware", ]
INSTALLED_APPS += ["debug_toolbar", ]
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'HIDE_DJANGO_SQL': True,
'TAG': 'body',
'SHOW_TEMPLATE_CONTEXT': True,
'ENABLE_STACKTRACES': True,
'SHOW_TOOLBAR_CALLBACK': 'noisefilter.settings.development.show_toolbar',
}
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
)
try:
from local_settings import *
except ImportError:
pass
|
itsMagondu/IoTNeuralNetworks | noisefilter/noisefilter/settings/__init__.py | """ Settings for noisefilter """
|
itsMagondu/IoTNeuralNetworks | kalmanfilter.py | <filename>kalmanfilter.py
# Kalman filter in Python adopted from http://scipy-cookbook.readthedocs.io/items/KalmanFiltering.html
import numpy as np
import matplotlib.pyplot as plt
import time
class KalmanFilter:
def __init__(self,base_value=24,iterations=200,initial_guess=20.0,posteri_estimate=4.0,data=[],plot=False):
# intial parameters
self.n_iter = iterations # How many iterations to create test data
sz = (self.n_iter,) # size of array
self.x = base_value # This is the base value that shall be used to create noisy data. It is the true value
if len(data) == 0:
self.z = np.random.normal(self.x,1,size=sz) # observations (normal about x, sigma=0.1)
else:
self.z = data
self.Q = 1e-5 # process variance
# allocate space for arrays
self.xhat=np.zeros(sz) # a posteri estimate of x
self.P=np.zeros(sz) # a posteri error estimate
self.xhatminus=np.zeros(sz) # a priori estimate of x
self.Pminus=np.zeros(sz)
# a priori error estimate
self.K=np.zeros(sz) # gain or blending factor
self.R = 2
# intial guesses
self.xhat[0] = initial_guess #Initial estimate
self.P[0] = posteri_estimate#Estimate of the error made
self.plot = plot
def filter(self):
print "starting the filter"
start = time.time()
for k in range(1,self.n_iter):
# time update
self.xhatminus[k] = self.xhat[k-1]
self.Pminus[k] = self.P[k-1]+self.Q
# measurement update
self.K[k] = self.Pminus[k]/( self.Pminus[k]+self.R )
self.xhat[k] = self.xhatminus[k]+self.K[k]*(self.z[k]-self.xhatminus[k])
self.P[k] = (1-self.K[k])*self.Pminus[k]
end = time.time()
print("Took %s seconds" % (time.time() - start))
print "Noisy data: "
print self.z
print "Estimates:"
print self.xhat
print "Truth Value:"
print self.x
#print "Error estimate"
#print self.P
if self.plot:
self.plot_results()
return self.z, self.xhat, self.x
def plot_results(self):
plt.rcParams['figure.figsize'] = (10, 8)
plt.figure()
plt.plot(self.z,'k+',label='noisy measurements')
plt.plot(self.xhat,'b-',label='a posteri estimate')
plt.axhline(self.x,color='g',label='truth value')
plt.legend()
plt.title('Estimate vs. iteration step', fontweight='bold')
plt.xlabel('Iteration')
plt.ylabel('Temperature')
#plt.figure()
#valid_iter = range(1,self.n_iter) # Pminus not valid at step 0
#plt.plot(valid_iter,self.Pminus[valid_iter],label='a priori error estimate')
#plt.title('Estimated $\it{\mathbf{a \ priori}}$ error vs. iteration step', fontweight='bold')
#plt.xlabel('Iteration')
#plt.ylabel('$(Temparature)^2$')
#plt.setp(plt.gca(),'ylim',[0,.01])
plt.show()
|
itsMagondu/IoTNeuralNetworks | noisefilter/apps/filter/apps.py | from __future__ import unicode_literals
from django.apps import AppConfig
class FilterConfig(AppConfig):
name = 'filter'
|
itsMagondu/IoTNeuralNetworks | noisefilter/apps/filter/migrations/0001_initial.py | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-09-07 18:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ANNConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('layers', models.IntegerField(blank=True, null=True)),
('activation', models.CharField(blank=True, default='', max_length=20, null=True)),
('x_value', models.FloatField(blank=True, null=True)),
('y_value', models.FloatField(blank=True, null=True)),
('learning_rate', models.IntegerField(blank=True, null=True)),
('epochs', models.IntegerField(blank=True, null=True)),
('added', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reading', models.FloatField(blank=True, null=True)),
('prevreading', models.FloatField(blank=True, null=True)),
('output', models.FloatField(blank=True, null=True)),
('error', models.FloatField(blank=True, null=True)),
('truevalue', models.FloatField(blank=True, null=True)),
('kalmanvalue', models.FloatField(blank=True, null=True)),
('added', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='KalmanConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('base_value', models.FloatField(blank=True, null=True)),
('iterations', models.FloatField(blank=True, null=True)),
('initial_guess', models.FloatField(blank=True, null=True)),
('posteri_estimate', models.FloatField(blank=True, null=True)),
('added', models.DateTimeField(auto_now_add=True)),
('active', models.BooleanField(default=True)),
],
),
]
|
itsMagondu/IoTNeuralNetworks | noisefilter/apps/filter/migrations/0002_auto_20160907_1203.py | <reponame>itsMagondu/IoTNeuralNetworks<filename>noisefilter/apps/filter/migrations/0002_auto_20160907_1203.py<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-09-07 19:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('filter', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TrainingExample',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dataoutput', models.FloatField(blank=True, null=True)),
('datainput', models.FloatField(blank=True, null=True)),
],
),
migrations.RemoveField(
model_name='annconfiguration',
name='x_value',
),
migrations.RemoveField(
model_name='annconfiguration',
name='y_value',
),
]
|
hezuoguang/ZGVL | WLServer/api/admin.py | <reponame>hezuoguang/ZGVL
#coding:utf-8
from django.contrib import admin
from api.models import *
# Register your models here.
class UserAdmin(admin.ModelAdmin):
list_display = ('uid', 'name', 'sex', 'birthday', 'city', 'pwd', 'access_token')
class MessageAdmin(admin.ModelAdmin):
list_display = ('id', 'text', 'type', 'to_user')
class StatusAdmin(admin.ModelAdmin):
list_display = ('id', 'text', 'pics', 'from_user')
class CommentAdmin(admin.ModelAdmin):
list_display = ('id', 'text', 'status', 'from_user')
class NewFriendAdmin(admin.ModelAdmin):
list_display = ('id', 'text', 'status', 'to_user')
admin.site.register(User, UserAdmin)
admin.site.register(Message, MessageAdmin)
admin.site.register(Status, StatusAdmin)
admin.site.register(Newfriend, NewFriendAdmin)
admin.site.register(Comment)
|
hezuoguang/ZGVL | WLServer/api/function.py | <gh_stars>0
# -*- coding: UTF-8 -*-
__author__ = 'weimi'
from api.models import *
from django.db.models import Q
import hashlib
import re
import json
from qiniu import Auth
access_key = "<KEY>"
secret_key = "IujhqwUXdusrrLYooPA4WZdJtS7RR6r65TALg2p_"
bucket_name = "weiliao"
pwdfix = "weimi"
photoCount = 43
photoUrl = "http://7xl0k3.com1.z0.glb.clouddn.com/photo"
def safestr(str):
str = str.replace("\r", " ")
str = str.replace("\t", " ")
str = str.replace("\n", " ")
str = str.replace("\\", "\\\\")
str = str.replace("\"", "\\\"")
return str
# 通过uid 和 pwd 获取一个用户 没有返回None
def queryUser(uid, pwd):
try:
pwd = hashlib.new("md5", pwd + pwdfix).hexdigest()
user = User.objects.get(uid = uid, pwd = pwd)
except:
return None
return user
# 通过uid 和 pwd 注册一个用户 返回None表示 uid已被注册, -1 为 服务器发生错误
def registerUser(uid, pwd):
try:
user = User.objects.get(uid = uid)
except:
try:
user = User()
user.uid = uid
user.name = uid
count = User.objects.count()
photo = photoUrl + (str)(count % photoCount + 1) + ".jpg"
user.photo = photo
user.pwd = hashlib.new("md5", pwd + pwdfix).hexdigest()
user.access_token = hashlib.new("md5", uid + pwdfix + user.pwd).hexdigest()
user.save()
return user
except:
return -1
return None
# 参数 text(
# 聊天内容,文字消息为:消息内容; gif表情消息为:gif表情对应的图片名
# 称 名称;语音,图片消息为:资源的url
# )
# type(消息类型)
# access_token
# to_user(接收者uid)
# 返回:-1, 登录失效, -2, to_user不存在, None 服务器发生错误
def insertMessage(text, type, access_token, to_user):
try:
from_user = User.objects.get(access_token = access_token)
except:
return -1
try:
to_user = User.objects.get(uid = to_user)
except:
return -2
try:
if to_user.uid == from_user.uid:
return -2
message = Message()
message.text = safestr(text)
message.type = type
message.to_user = to_user
message.save()
from_user.messgaes.add(message)
from_user.save()
return {"message" : message}
except:
return None
# 通过access_token 获得 消息id大于since_id的数据, 并且不多于 count 条
def queryNewMessages(since_id, access_token, count):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
if (int)(since_id) > 0:
# 查找 id > since_id 并且由 user 接收的 message 最近的 count 条
messages_to_user = Message.objects.filter(to_user = user, id__gt = since_id).order_by("id")[0 : count]
# 查找 id > since_id 并且由 user 发出的 message 最近的 count 条
messages_from_user = user.messgaes.filter(id__gt = since_id).order_by("id")[0 : count]
messages = set()
for message in messages_to_user:
messages.add(message)
for message in messages_from_user:
messages.add(message)
messages = sorted(list(messages), key=lambda m1:m1.id)[0 : count]
return {"messages" : messages}
else:
# 查找 由 user 接收的 message 最近的 count 条
messages_to_user = Message.objects.filter(to_user = user).order_by("-id")[0 : count]
# 查找 由 user 发出的 message 最近的 count 条
messages_from_user = user.messgaes.all().order_by("-id")[0 : count]
messages = set()
for message in messages_to_user:
messages.add(message)
for message in messages_from_user:
messages.add(message)
messages = sorted(list(messages), key=lambda m1:-m1.id)[0 : count]
return {"messages" : messages}
except:
return None
# 通过access_token 获得 消息id小于max_id的数据, 并且不多于 count 条
def queryOldMessages(max_id, access_token, count):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
# 查找 id < max_id 并且由 user 接收的 message 最近的 count 条
messages_to_user = Message.objects.filter(to_user = user, id__lt = max_id).order_by("-id")[0 : count]
# 查找 id < max_id 并且由 user 发出的 message 最近的 count 条
messages_from_user = user.messgaes.filter(id__lt = max_id).order_by("-id")[0 : count]
messages = set()
for message in messages_to_user:
messages.add(message)
for message in messages_from_user:
messages.add(message)
messages = sorted(list(messages), key=lambda m1:-m1.id)[0 : count]
return {"messages" : messages}
except:
return None
# 参数 text(
# text(状态内容)
# access_token
# pics(图片)
# 返回:-1, 登录失效, -2, to_user不存在, None 服务器发生错误
def insertStatus(text, access_token, pics):
try:
from_user = User.objects.get(access_token = access_token)
except:
return -1
try:
status = Status()
status.text = safestr(text)
status.pics = " ".join(pics)
status.from_user = from_user
status.save()
return {"status" : status}
except:
return None
# 通过access_token 获得 status id大于since_id的数据, 并且不多于 count 条
def queryNewStatuses(since_id, access_token, count):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
if (int)(since_id) > 0:
# 查找 id > since_id 并且由 user 发出的 status 或者 user 的好友发出的 status (最近的 count 条)
statuses = Status.objects.filter(Q(from_user = user) | Q(from_user__in = user.friends.all()) ,id__gt = since_id).order_by("id")[0 : count]
for status in statuses:
status.pics = picsWithText(status.pics)
# id大的在前
statuses = sorted(statuses, key=lambda s1:-s1.id)[0 : count]
return {"statuses" : statuses}
else:
# 查找 由 user 发出的 status 或者 user 的好友发出的 status (最近的 count 条)
statuses = Status.objects.filter(Q(from_user = user) | Q(from_user__in = user.friends.all())).order_by("-id")[0 : count]
for status in statuses:
status.pics = picsWithText(status.pics)
# id大的在前
statuses = sorted(statuses, key=lambda s1:-s1.id)[0 : count]
return {"statuses" : statuses}
except:
return None
# 通过access_token 获得 status id大于since_id的数据, 并且不多于 count 条
def queryOldStatuses(max_id, access_token, count):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
# 查找 id > since_id 并且由 user 发出的 status 或者 user 的好友发出的 status 最近的 count 条
statuses = Status.objects.filter(Q(from_user = user) | Q(from_user__in = user.friends.all()), id__lt = max_id).order_by("-id")[0 : count]
for status in statuses:
status.pics = picsWithText(status.pics)
# id大的在前
statuses = sorted(statuses, key=lambda s1:-s1.id)[0 : count]
return {"statuses" : statuses}
except:
return None
# 处理图片(pics) 数组
def picsWithText(text):
arr = text.split(" ")
pics = list()
regex = re.compile(
r'^(?:http|ftp)s?://'
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|'
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
r'(?::\d+)?'
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
for pic in arr:
if regex.match(pic):
pics.append(pic)
return pics
# 添加一条评论
def insertComment(text, access_token, s_id):
try:
from_user = User.objects.get(access_token = access_token)
except:
return -1
try:
status = Status.objects.get(id = s_id)
except:
return -2
try:
comment = Comment()
comment.text = safestr(text)
comment.status = status
comment.from_user = from_user
comment.save()
return {"comment" : comment}
except:
return None
# 获取一条状态的所有评论
def queryComments(s_id):
try:
status = Status.objects.get(id = s_id)
except:
return -2
try:
comments = status.comment_set.all().order_by('-id')
return {"comments" : comments}
except:
return None
# 请求添加朋友
def addFriend(text, access_token, to_user):
try:
from_user = User.objects.get(access_token = access_token)
except:
return -1
try:
to_user = User.objects.get(uid = to_user)
except:
return -2
try:
if to_user.uid == from_user.uid:
return -3
if from_user in to_user.friends.all():
return -4
# 防止请求重复发
newfirends = from_user.newfriends.filter(to_user = to_user, status = 0)
if newfirends.count() != 0:
return -5
newfirends = to_user.newfriends.filter(to_user = from_user, status = 0)
if newfirends.count() != 0:
return -6
newfirend = Newfriend()
newfirend.text = safestr(text)
newfirend.to_user = to_user
newfirend.save()
from_user.newfriends.add(newfirend)
from_user.save()
return {"newfirend" : newfirend}
except:
return None
# 处理一个好友请求
def dowithAddFriend(f_id, access_token, result):
try:
to_user = User.objects.get(access_token = access_token)
except:
return -1
try:
newfirend = Newfriend.objects.get(id = f_id)
except:
return -2
try:
if newfirend.to_user.uid != to_user.uid or newfirend.status != 0:
return -2
newfirend.status = result
newfirend.save()
if result == 2:
from_user = newfirend.user_set.all().first()
to_user.friends.add(from_user)
to_user.save()
insertMessage("我已经同意你的好友请求了,开始对话吧!", 0, access_token, from_user.uid)
return {"newfirend" : newfirend}
except:
return None
# 删除一个好友
def deleteFriend(to_user, access_token):
try:
from_user = User.objects.get(access_token = access_token)
except:
return -1
try:
to_user = User.objects.get(uid = to_user)
except:
return -2
try:
if to_user.uid == from_user.uid:
return -3
if from_user not in to_user.friends.all():
return -4
from_user.friends.remove(to_user)
from_user.save()
return {"from_user" : from_user}
except:
return None
# 获取所有的好友请求
def newFriends(access_token):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
newfriends = Newfriend.objects.filter(to_user = user, status = 0)
return {"newfriends" : newfriends}
except:
return None
# 获取好友列表
def queryFriendList(access_token):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
friendlist = user.friends.all().order_by("-name", "-uid")
return {"friendlist" : friendlist}
except:
return None
# 搜索陌生人
def querySearch(access_token, key, page):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
myfriend_uid = list()
myfriend_uid.append(user.uid)
myfriend = user.friends.all()
for f in myfriend:
myfriend_uid.append(f.uid)
users = User.objects.filter((Q(uid__icontains = key) | Q(name__icontains = key)) & ~Q(uid__in = myfriend_uid))[page * 10 : (page + 1) * 10]
return {"users" : users}
except:
return None
# 获取用户信息
def queryUserInfo(uid, access_token):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
to_user = User.objects.get(uid = uid)
isfriend = 0
if to_user in user.friends.all():
isfriend = 1
return {"user" : to_user, "isfriend" : isfriend}
except:
return -2
# 更新用户信息 "phton",
# name
# "age":
# "sex":
# "birthday":
# "city":
def updateUserInfo(access_token, name, age, sex, birthday, city):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
user.name = safestr(name)
user.age = age
user.sex = sex
user.birthday = birthday
user.city = safestr(city)
user.save()
return {"user" : user}
except:
return None
# 更新用户密码
def updateUserPwd(access_token, pwd, oldpwd):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
if user.pwd != hashlib.new("md5", oldpwd + pwdfix).hexdigest():
return -2
user.pwd = hashlib.new("md5", pwd + pwdfix).hexdigest()
user.access_token = hashlib.new("md5", user.uid + pwdfix + user.pwd).hexdigest()
user.save()
return {"user" : user}
except:
return None
# 更新用户头像
def updateUserPhoto(access_token, photo):
try:
user = User.objects.get(access_token = access_token)
except:
return -1
try:
user.photo = photo
user.save()
return {"user" : user}
except:
return None
# 获得七牛上传凭证 key 文件名
def getQiniu_token(key):
q = Auth(access_key, secret_key)
token = q.upload_token(bucket_name, key)
return {"token" : token} |
hezuoguang/ZGVL | WLServer/api/models.py | # coding:utf-8
from django.db import models
# Create your models here.
# 用户模型
class User(models.Model):
# 账号,唯一标识
uid = models.CharField(max_length = 16, primary_key = True, verbose_name = "用户名")
# 密码
pwd = models.CharField(max_length = 255, verbose_name = "密码")
# 昵称
name = models.CharField(max_length = 16, default = "微米", verbose_name = "昵称")
# 头像
photo = models.CharField(max_length = 1024, default = "http://7xl0k3.com1.z0.glb.clouddn.com/default.jpg", verbose_name = "头像")
# 年龄
age = models.PositiveSmallIntegerField(default = 0, verbose_name = "年龄")
# 性别
sex = models.CharField(max_length = 4, default = "未知", verbose_name = "性别")
# 生日
birthday = models.DateTimeField(verbose_name = "生日", auto_now_add = True)
# 城市
city = models.CharField(max_length = 255, default = "怀化", verbose_name = "城市")
# 好友们
friends = models.ManyToManyField("self", blank = True, verbose_name = "好友们")
# 消息
messgaes = models.ManyToManyField("Message", blank = True)
# 添加好友消息
newfriends = models.ManyToManyField("Newfriend", blank = True)
# 授权标识
access_token = models.TextField(verbose_name = "授权标识")
def __unicode__(self):
return self.uid + "(" + self.name + ")"
# 消息模型
class Message(models.Model):
# 消息内容,文字消息为:消息内容; gif表情消息为:gif表情对应的图片名 称 名称;语音,图片消息为:资源的url
text = models.CharField(max_length = 1024, verbose_name = "消息内容")
# 消息创建时间
create_time = models.DateTimeField(auto_now_add = True, verbose_name = "创建时间")
# 消息类型 (0, "文本消息"),(1, "gif表情消息"),(2, "图片消息"),(3, "语音消息")
type = models.PositiveSmallIntegerField(verbose_name = "消息类型", default = 0)
# 接收者 user模型
to_user = models.ForeignKey(User, verbose_name = "接收者")
def __unicode__(self):
return self.text
# 添加好友消息模型
class Newfriend(models.Model):
# 接收者 user模型
to_user = models.ForeignKey(User, verbose_name = "接收者")
# 请求说明
text = models.CharField(max_length = 1024, verbose_name = "请求说明")
# 处理状态,同意\拒绝\忽略\处理中((0, "处理中"),(1, "拒绝"),(3, "同意"))
status = models.PositiveSmallIntegerField(verbose_name = "处理状态", default = 0)
# 消息创建时间
create_time = models.DateTimeField(auto_now_add = True, verbose_name = "创建时间")
def __unicode__(self):
return "(" + self.text + ")"
# 状态模型,类似微博,朋友圈
class Status(models.Model):
# 状态内容
text = models.CharField(max_length = 1024, verbose_name = "状态内容")
# 创建时间
create_time = models.DateTimeField(auto_now_add = True, verbose_name = "创建时间")
# 图片链接 数组
pics = models.TextField(verbose_name = "图片地址", blank = True)
#发送者 user模型
from_user = models.ForeignKey(User, verbose_name = "发送者")
def __unicode__(self):
return (str)(self.id) + "(" + self.text + ")"
# 状态的评论模型
class Comment(models.Model):
# 评论内容
text = models.CharField(max_length = 255, verbose_name = "评论内容")
# 创建时间
create_time = models.DateTimeField(auto_now_add = True, verbose_name = "创建时间")
# 发送者 user模型
from_user = models.ForeignKey(User, verbose_name = "发送者")
# 所评论的状态
status = models.ForeignKey(Status, verbose_name = "所评论的状态")
def __unicode__(self):
return "(" + self.text + ")"
|
hezuoguang/ZGVL | WLServer/zgvl/urls.py | #coding:utf-8
from django.conf.urls import patterns, include, url
from django.contrib import admin
from api.views import doc
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'zgvl.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin', include(admin.site.urls)),
url(r'^admin/', include(admin.site.urls)),
# api路由
url(r'^api/', include('api.urls')),
)
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns += staticfiles_urlpatterns()
urlpatterns += patterns('',
# apidoc
url(r'^', doc),
) |
hezuoguang/ZGVL | WLServer/api/views.py | # coding:utf-8
from django.shortcuts import render, render_to_response, HttpResponse
from api.function import *
from api.models import *
defaultCount = 40
# Create your views here.
# API文档
def doc(request):
return render_to_response("doc/doc.html", {})
# 登录处理 POST方式, 参数 uid, pwd, pwd须进行MD5加密
def login(request):
if request.method == "POST":
try:
uid = request.POST["uid"]
pwd = request.POST["pwd"]
except:
return error("请求参数不正确")
user = queryUser(uid, pwd)
if user == None:
return error("用户名或密码错误")
context = dict()
context["uid"] = user.uid;
context["name"] = user.name
context["photo"] = user.photo;
context["access_token"] = user.access_token
return render_to_response("login.json", context, content_type = 'application/json')
else:
return error("请求方式不正确,应使用POST")
# 注册处理 POST方式, 参数 uid, pwd, pwd须进行MD5加密
def register(request):
if request.method == "POST":
try:
uid = request.POST["uid"]
pwd = request.POST["pwd"]
if len(pwd) < 6 or len(pwd) > 64:
return error("密码长度不符合要求")
if len(uid) < 6 or len(uid) > 16:
return error("用户名长度不符合要求")
except:
return error("请求参数不正确")
user = registerUser(uid, pwd)
if user == None:
return error("注册失败, 用户名已被注册")
elif user == -1:
return error("注册失败, 请稍后再试")
return render_to_response("register.json", {}, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 发送一条消息 POST方式,
# 参数 text(
# 聊天内容,文字消息为:消息内容; gif表情消息为:gif表情对应的图片名
# 称 名称;语音,图片消息为:资源的url
# )
# type(消息类型) (0, "文本消息"),(1, "gif表情消息"),(2, "图片消息"),(3, "语音消息")
# access_token
# to_user(接收者uid)
def chat_upload(request):
if request.method == "POST":
try:
# 还须细化 为语音和图片消息时 并未对参数的进行严格的判定(url)
text = request.POST["text"]
access_token = request.POST["access_token"]
to_user = request.POST["to_user"]
type = request.POST["type"]
type = (int)(type)
if type < 0 or type > 3:
type = 0
except:
return error("请求参数不正确")
context = insertMessage(text, type, access_token, to_user)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("目的用户不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("chat/upload.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获得新的message POST方式,
# 参数 access_token, since_id, count
# 通过access_token 获得 消息id大于since_id的数据, 并且不多于 count 条
def chat_newmessages(request):
if request.method == "POST":
try:
since_id = request.POST["since_id"]
access_token = request.POST["access_token"]
count = defaultCount
if request.POST.has_key("count"):
count = (int)(request.POST["count"])
if count <= 0:
count = 1
elif count > defaultCount:
count = defaultCount
except:
return error("请求参数不正确")
context = queryNewMessages(since_id, access_token, count)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("chat/messages.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获得旧的message POST方式,
# 参数 access_token, max_id, count
# 通过access_token 获得 消息id < max_id的数据, 并且不多于 count 条
def chat_oldmessages(request):
if request.method == "POST":
try:
max_id = request.POST["max_id"]
access_token = request.POST["access_token"]
count = defaultCount
if request.POST.has_key("count"):
count = (int)(request.POST["count"])
if count <= 0:
count = 1
elif count > defaultCount:
count = defaultCount
except:
return error("请求参数不正确")
context = queryOldMessages(max_id, access_token, count)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("chat/messages.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 发送一条状态 POST方式,
# 参数 text(状态内容)
# pics(图片地址, 数组)
# access_token(发送者)
def status_upload(request):
if request.method == "POST":
try:
text = request.POST["text"]
access_token = request.POST["access_token"]
pics = list()
# 还须细化 并未对参数的进行严格的判定(url)
if request.POST.has_key("pics[]"):
pics = request.POST.getlist('pics[]')
print pics
if len(pics) > 9:
return error("图片数量不能多于9张")
except:
return error("请求参数不正确")
context = insertStatus(text,access_token, pics)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("status/upload.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获得新的status POST方式,
# 参数 access_token, since_id, count
# 通过access_token 获得 status id大于since_id的数据, 并且不多于 count 条
def status_newstatuses(request):
if request.method == "POST":
try:
since_id = request.POST["since_id"]
access_token = request.POST["access_token"]
count = defaultCount
if request.POST.has_key("count"):
count = (int)(request.POST["count"])
if count <= 0:
count = 1
elif count > defaultCount:
count = defaultCount
except:
return error("请求参数不正确")
context = queryNewStatuses(since_id, access_token, count)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("status/statuses.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获得旧的status POST方式,
# 参数 access_token, max_id, count
# 通过access_token 获得 status id < max_id的数据, 并且不多于 count 条
def status_oldstatuses(request):
if request.method == "POST":
try:
max_id = request.POST["max_id"]
access_token = request.POST["access_token"]
count = defaultCount
if request.POST.has_key("count"):
count = (int)(request.POST["count"])
if count <= 0:
count = 1
elif count > defaultCount:
count = defaultCount
except:
return error("请求参数不正确")
context = queryOldStatuses(max_id, access_token, count)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("status/statuses.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 发送一条评论 POST方式,
# 参数 text(评论内容)
# access_token
# s_id(status id)
def comment_upload(request):
if request.method == "POST":
try:
text = request.POST["text"]
access_token = request.POST["access_token"]
s_id = request.POST["s_id"]
except:
return error("请求参数不正确")
context = insertComment(text, access_token, s_id)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("该状态不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("comment/upload.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获取一条状态的所有评论 POST方式,
# s_id(status id)
def comment_comments(request):
if request.method == "POST":
try:
s_id = request.POST["s_id"]
except:
return error("请求参数不正确")
context = queryComments(s_id)
if context == -2:
return error("该状态不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("comment/comments.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 请求添加好友 POST方式,
# to_user(接收者uid)
# text(请求说明)
# access_token
def friend_addfriend(request):
if request.method == "POST":
try:
text = request.POST["text"]
access_token = request.POST["access_token"]
to_user = request.POST["to_user"]
except:
return error("请求参数不正确")
context = addFriend(text, access_token, to_user)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("添加的用户不存在")
elif context == -3:
return error("不能添加自己为好友")
elif context == -4:
return error("对方已经是你好友了")
elif context == -5:
return error("请求已发出无需重复请求")
elif context == -6:
return error("对方已对你发出好友请求,同意其请求即可")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/addfriend.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 处理添加好友请求 POST方式,
# f_id(好友请求消息id)
# access_token
# result (处理结果)(1, "拒绝"),(2, "同意")
def friend_dowithrequest(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
f_id = request.POST["f_id"]
result = request.POST["result"]
result = (int)(result)
if result != 1 and result != 2:
result = 1
except:
return error("请求参数不正确")
context = dowithAddFriend(f_id, access_token, result)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("该请求不存在")
elif context == -3:
return error("不能添加自己为好友")
elif context == -4:
return error("对方已经是你好友了")
elif context == -5:
return error("请求已发出无需重复请求")
elif context == -6:
return error("对方已对你发出好友请求,同意其请求即可")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/addfriend.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 删除一个好友
def friend_deletefriend(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
to_user = request.POST["to_user"]
except:
return error("请求参数不正确")
context = deleteFriend(to_user, access_token)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("欲删除的用户不存在")
elif context == -3:
return error("不能删除自己")
elif context == -4:
return error("对方还不是你好友")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/addfriend.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获取所有未处理的好友请求
def friend_newfriends(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
except:
return error("请求参数不正确")
context = newFriends(access_token)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/newfriends.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获取好友列表
def friend_friendlist(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
except:
return error("请求参数不正确")
context = queryFriendList(access_token)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/friendlist.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 搜索陌生人
def friend_search(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
key = request.POST["key"]
page = request.POST["page"]
page = (int)(page)
except:
return error("请求参数不正确")
context = querySearch(access_token, key, page)
if context == -1:
return error("登录失效, 请重新登录")
elif context == None:
return error("服务器发生错误")
return render_to_response("friend/users.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 获取用户信息, uid
def user_userinfo(request):
if request.method == "POST":
try:
uid = request.POST["uid"]
access_token = request.POST["access_token"]
except:
return error("请求参数不正确")
context = queryUserInfo(uid, access_token)
if context == -1:
return error("登录失效, 请重新登录")
elif context == -2:
return error("用户不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("user/userinfo.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 更新用户信息, access_token
def user_updateuserinfo(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
name = request.POST["name"]
age = request.POST["age"]
sex = request.POST["sex"]
birthday = request.POST["birthday"]
city = request.POST["city"]
if len(city) <= 0 or len(name) <= 0 or len(age) <= 0 or len(sex) <= 0 or len(birthday) <= 0:
return error("请求参数不正确")
except:
return error("请求参数不正确")
context = updateUserInfo(access_token, name, age, sex, birthday, city)
if context == -1:
return error("用户不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("user/userinfo.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 更新用户密码, access_token, pwd oldpwd
def user_updateuserpwd(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
pwd = request.POST["pwd"]
oldpwd = request.POST["oldpwd"]
if len(pwd) < 6 or len(pwd) > 64:
return error("密码长度不能小于6")
except:
return error("请求参数不正确")
context = updateUserPwd(access_token, pwd, oldpwd)
if context == -1:
return error("用户不存在")
elif context == -2:
return error("旧密码不符,修改失败")
elif context == None:
return error("服务器发生错误")
return render_to_response("user/userinfo.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
# 更新用户头像, access_token, photo
def user_updateuserphoto(request):
if request.method == "POST":
try:
access_token = request.POST["access_token"]
photo = request.POST["photo"]
except:
return error("请求参数不正确")
context = updateUserPhoto(access_token, photo)
if context == -1:
return error("用户不存在")
elif context == None:
return error("服务器发生错误")
return render_to_response("user/userinfo.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
def qiniu_token(request):
if request.method == "POST":
try:
key = request.POST["fileName"]
except:
return error("请求参数不正确")
context = getQiniu_token(key)
if context == None:
return error("服务器发生错误")
return render_to_response("qiniu/token.json", context, content_type = "application/json")
else:
return error("请求方式不正确,应使用POST")
def error(message):
return render_to_response("error.json", {"message" : message}, content_type = 'application/json') |
hezuoguang/ZGVL | WLServer/zgvl/settings.py | #coding:utf-8
"""
Django settings for zgvl project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
from os import environ
debug = not environ.get("APP_NAME", "")
if debug:
#LOCAL 本地调试用,便于导出数据库,根据本地MYSQL数据库填写下面参数<----------------如果文件中出现中文,一定要在开始添加 #coding:utf-8
MYSQL_DB = 'zgvl'
MYSQL_USER = 'root'
MYSQL_PASS = '<PASSWORD>'
MYSQL_HOST_M = '127.0.0.1'
MYSQL_HOST_S = '127.0.0.1'
MYSQL_PORT = '3306'
else:
#SAE
import sae.const
MYSQL_DB = sae.const.MYSQL_DB
MYSQL_USER = sae.const.MYSQL_USER
MYSQL_PASS = sae.const.MYSQL_PASS
MYSQL_HOST_M = sae.const.MYSQL_HOST
MYSQL_HOST_S = sae.const.MYSQL_HOST_S
MYSQL_PORT = sae.const.MYSQL_PORT
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=a5)r@51q-z_t*+(g29nn*+g1xuo-%k%ufaz6olxa0ijs@wg)('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
)
ROOT_URLCONF = 'zgvl.urls'
WSGI_APPLICATION = 'zgvl.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DB,
'USER': MYSQL_USER,
'PASSWORD': <PASSWORD>,
'HOST': MYSQL_HOST_M,
'PORT': MYSQL_PORT,
'default-character-set' : 'utf8',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'ch-cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_TZ = True
USE_L10N = False
DATETIME_FORMAT = 'Y-m-d H:i:s'
DATE_FORMAT = 'Y-m-d'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
SITE_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)),'')
STATIC_ROOT = os.path.join(SITE_ROOT,'static')
STATIC_URL = '/static/'
#最后关键部分需要添加上STATICFILE_DIRS的配置
STATICFILES_DIRS = (
("css", os.path.join(STATIC_ROOT,'css')),
("js", os.path.join(STATIC_ROOT,'js')),
("images", os.path.join(STATIC_ROOT,'images')),
)
|
hezuoguang/ZGVL | WLServer/zgvl/wsgi.py | #coding:utf-8
"""
WSGI config for zgvl project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
import sys
root = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(root, '..', 'site-packages'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zgvl.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
hezuoguang/ZGVL | WLServer/api/urls.py | #coding:utf-8
from django.conf.urls import patterns, include, url
from api.views import *
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'zgvl.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# 登录
url(r'^login.json', login),
# 注册
url(r'^register.json', register),
# 发送一条消息
url(r'^chat/upload.json', chat_upload),
# 获取新的消息, 消息的id大于since_id, 默认最多获取40条
url(r'^chat/newmessages.json', chat_newmessages),
# 获取旧的消息, 消息的id小于max_id 默认最多获取40条
url(r'^chat/oldmessages.json', chat_oldmessages),
# 发送一条状态
url(r'^status/upload.json', status_upload),
# 获取新的状态, 消息的id大于since_id, 默认最多获取40条
url(r'^status/newstatuses.json', status_newstatuses),
# 获取旧的状态, 消息的id小于max_id 默认最多获取40条
url(r'^status/oldstatuses.json', status_oldstatuses),
# 发送一条评论
url(r'^comment/upload.json', comment_upload),
# 获取一条状态的所有评论
url(r'^comment/comments.json', comment_comments),
# 请求添加好友
url(r'^friend/addfriend.json', friend_addfriend),
# 处理一个好友请求
url(r'^friend/dowithrequest.json', friend_dowithrequest),
# 删除一个好友
url(r'^friend/deletefriend.json', friend_deletefriend),
# 获取所有未处理的好友请求
url(r'^friend/newfriends.json', friend_newfriends),
# 获取好友列表
url(r'^friend/friendlist.json', friend_friendlist),
# 搜索陌生人
url(r'^friend/search.json', friend_search),
# 获取用户信息
url(r'^user/userinfo.json', user_userinfo),
# 更新用户信息
url(r'^user/updateuserinfo.json', user_updateuserinfo),
# 更新用户密码
url(r'^user/updateuserpwd.json', user_updateuserpwd),
# 更新用户头像
url(r'^user/updateuserphoto.json', user_updateuserphoto),
# 获得七牛上传凭证
url(r'^qiniu/token.json', qiniu_token),
# 接口文档
url(r'^doc', doc),
url(r'^', doc),
)
# from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# urlpatterns += staticfiles_urlpatterns() |
sowmyamanojna/BT2020-Numerical-methods-in-Biology | class_hw/cos_taylor_pi_by_4.py | import numpy as np
import math
def factorial(n):
fact = 1
for i in range(1,n+1):
fact *= i
return fact
summation = 1
for i in range(1,7):
val = (-1)**i * (math.pi/3)**(2*i) / factorial(2*i)
summation += val
print "val: ", val
print "summation: ", summation
print "Final summation: ", summation |
miya779/crawler-poder360 | main.py | #poder360 crawler
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import re
import mysql.connector
#used to convert the string month to mysql numerical format
month = {'jan': '01', 'fev': '02', 'mar': '03', 'abr': '04', 'mai': '05', 'jun': '06', 'jul': '07', 'ago': '08', 'set': '09', 'out': '10', 'nov': '11', 'dez': '12'}
cnx = mysql.connector.connect(user='root',password='<PASSWORD>', database='poder_news')
cursor = cnx.cursor()
try:
for i in range(1, 4087):
if(i == 1):
page = requests.get('https://www.poder360.com.br/todas-Noticias/')
else:
page = requests.get('https://www.poder360.com.br/todas-Noticias/page/'+str(i))
soup = BeautifulSoup(page.text, 'html.parser')
#get all links from the page
news_links = [a['href'] for a in soup.findAll('a',{'class': 'row link-post'})]
for link in news_links:
try:
#print(link)
#extract title, resume, text and date from the news page and store them in mysql
news_page = requests.get(link)
soup = BeautifulSoup(news_page.text,'html.parser')
article = soup.article
title = article.h1.text
summary = soup.find('div',{'class':'resume'}).text.strip().replace('\n','. ')
text = soup.find('div',{'class':'content wp cropped js-mediator-article'}).text.replace("Continuar lendo","").strip().replace('\n','. ').replace("Receba a newsletter do Poder360todos os dias no seu e-mail","").replace("\xa0", " ")
date_hour = soup.find('p',{'class': 'author'}).text
date_hour = re.search('(\d{1,2})\.([a-zA-Z]{3})\.(\d{4})[^0-9]*(\d{1,2})h(\d{1,2})',date_hour)
date = date_hour.group(3) + "-" + month[date_hour.group(2)] + "-" + date_hour.group(1) # AAAA-MM-DD
hour = date_hour.group(4) + ":" + date_hour.group(5) + ":00" # HH:MM:SS
#insert news into mysql
query = "INSERT INTO news (link, date, time, title, summary,text) VALUES (%s, %s, %s, %s, %s, %s)"
values = (link, date, hour, title, summary, text)
cursor.execute(query, values)
cnx.commit()
except Exception as e:
print(str(e))
except Exception as e:
print(str(e))
cnx.close()
#(atualizado.*|$)?
#link = 'https://www.poder360.com.br/congresso/policia-prende-suspeitos-e-aponta-flordelis-como-mandante-de-assassinato/'
#news_page = requests.get(link)
#soup = BeautifulSoup(news_page.text,'html.parser')
#article = soup.article
#title = article.h1.text
#summary = soup.find('div',{'class':'resume'}).text.strip().replace('\n','. ')
#text = soup.find('div',{'class':'content wp cropped js-mediator-article'}).text.replace("Continuar lendo","").strip().replace('\n','. ').replace("Receba a newsletter do Poder360todos os dias no seu e-mail","").replace("\xa0", " ")
#date_hour = soup.find('p',{'class': 'author'}).text
#date_hour = re.search('(\d{1,2})\.([a-zA-Z]{3})\.(\d{4})[^0-9]*(\d{1,2})h(\d{1,2})',date_hour)
#date = date_hour.group(3) + "-" + month[date_hour.group(2)] + "-" + date_hour.group(1)
#hour = date_hour.group(4) + ":" + date_hour.group(5) + ":00"
#query = "INSERT INTO news (link, date, time, title, summary,text) VALUES (%s, %s, %s, %s, %s, %s)"
#values = (link, date, hour, title, summary, text)
#cursor.execute(query, values)
#cnx.commit()
|
hsgwa/ros2cli | ros2doctor/setup.py | from setuptools import find_packages
from setuptools import setup
package_name = 'ros2doctor'
setup(
name=package_name,
version='0.10.1',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['ros2cli'],
zip_safe=True,
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
url='',
download_url='',
keywords=[],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
],
description='The doctor command for ROS 2 command line tools',
long_description="""\
The package provides a cli tool to check potential issues in a ROS 2 system""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'ros2cli.command': [
'doctor = ros2doctor.command.doctor:DoctorCommand',
'wtf = ros2doctor.command.doctor:WtfCommand',
],
'ros2doctor.checks': [
'PlatformCheck = ros2doctor.api.platform:PlatformCheck',
'NetworkCheck = ros2doctor.api.network:NetworkCheck',
'TopicCheck = ros2doctor.api.topic:TopicCheck',
'PackageCheck = ros2doctor.api.package:PackageCheck',
],
'ros2doctor.report': [
'PlatformReport = ros2doctor.api.platform:PlatformReport',
'RosdistroReport = ros2doctor.api.platform:RosdistroReport',
'NetworkReport = ros2doctor.api.network:NetworkReport',
'RMWReport = ros2doctor.api.rmw:RMWReport',
'TopicReport = ros2doctor.api.topic:TopicReport',
'PackageReport = ros2doctor.api.package:PackageReport',
],
'ros2cli.extension_point': [
'ros2doctor.verb = ros2doctor.verb:VerbExtension',
],
'ros2doctor.verb': [
'hello = ros2doctor.verb.hello:HelloVerb'
]
}
)
|
hsgwa/ros2cli | ros2cli/setup.py | from setuptools import find_packages
from setuptools import setup
setup(
name='ros2cli',
version='0.10.1',
packages=find_packages(exclude=['test']),
extras_require={
'completion': ['argcomplete'],
},
data_files=[
('share/ament_index/resource_index/packages', [
'resource/ros2cli',
]),
('share/ros2cli', [
'package.xml',
'resource/package.dsv',
]),
('share/ros2cli/environment', [
'completion/ros2-argcomplete.bash',
'completion/ros2-argcomplete.zsh'
]),
],
zip_safe=False,
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>, <NAME>',
maintainer_email='<EMAIL>, <EMAIL>',
url='https://github.com/ros2/ros2cli/tree/master/ros2cli',
download_url='https://github.com/ros2/ros2cli/releases',
keywords=[],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
],
description='Framework for ROS 2 command line tools.',
long_description="""\
The framework provides a single command line script which can be extended with
commands and verbs.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'ros2cli.command': [
'daemon = ros2cli.command.daemon:DaemonCommand',
'extension_points ='
' ros2cli.command.extension_points:ExtensionPointsCommand',
'extensions = ros2cli.command.extensions:ExtensionsCommand',
],
'ros2cli.extension_point': [
'ros2cli.command = ros2cli.command:CommandExtension',
'ros2cli.daemon.verb = ros2cli.verb.daemon:VerbExtension',
],
'ros2cli.daemon.verb': [
'start = ros2cli.verb.daemon.start:StartVerb',
'status = ros2cli.verb.daemon.status:StatusVerb',
'stop = ros2cli.verb.daemon.stop:StopVerb',
],
'console_scripts': [
'ros2 = ros2cli.cli:main',
'_ros2_daemon = ros2cli.daemon:main',
],
}
)
|
hsgwa/ros2cli | ros2param/ros2param/verb/list.py | <reponame>hsgwa/ros2cli<gh_stars>0
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from rcl_interfaces.srv import ListParameters
import rclpy
from ros2cli.node.direct import DirectNode
from ros2cli.node.strategy import add_arguments
from ros2cli.node.strategy import NodeStrategy
from ros2node.api import get_absolute_node_name
from ros2node.api import get_node_names
from ros2node.api import NodeNameCompleter
from ros2param.api import call_describe_parameters
from ros2param.api import get_parameter_type_string
from ros2param.verb import VerbExtension
from ros2service.api import get_service_names
class ListVerb(VerbExtension):
"""Output a list of available parameters."""
def add_arguments(self, parser, cli_name): # noqa: D102
add_arguments(parser)
arg = parser.add_argument(
'node_name', nargs='?', help='Name of the ROS node')
arg.completer = NodeNameCompleter(
include_hidden_nodes_key='include_hidden_nodes')
parser.add_argument(
'--include-hidden-nodes', action='store_true',
help='Consider hidden nodes as well')
parser.add_argument(
'--param-prefixes', nargs='+', default=[],
help='Only list parameters with the provided prefixes')
parser.add_argument(
'--param-type', action='store_true',
help='Print parameter types with parameter names')
def main(self, *, args): # noqa: D102
with NodeStrategy(args) as node:
node_names = get_node_names(
node=node, include_hidden_nodes=args.include_hidden_nodes)
node_name = get_absolute_node_name(args.node_name)
if node_name:
if node_name not in [n.full_name for n in node_names]:
return 'Node not found'
node_names = [
n for n in node_names if node_name == n.full_name]
with DirectNode(args) as node:
service_names = get_service_names(
node=node, include_hidden_services=args.include_hidden_nodes)
clients = {}
futures = {}
# create clients for nodes which have the service
for node_name in node_names:
service_name = f'{node_name.full_name}/list_parameters'
if service_name in service_names:
client = node.create_client(ListParameters, service_name)
clients[node_name] = client
# wait until all clients have been called
while True:
for node_name in [
n for n in clients.keys() if n not in futures
]:
# call as soon as ready
client = clients[node_name]
if client.service_is_ready():
request = ListParameters.Request()
for prefix in args.param_prefixes:
request.prefixes.append(prefix)
future = client.call_async(request)
futures[node_name] = future
if len(futures) == len(clients):
break
rclpy.spin_once(node, timeout_sec=1.0)
# wait for all responses
for future in futures.values():
rclpy.spin_until_future_complete(node, future, timeout_sec=1.0)
# print responses
for node_name in sorted(futures.keys()):
future = futures[node_name]
if future.result() is not None:
if not args.node_name:
print(f'{node_name.full_name}:')
response = future.result()
sorted_names = sorted(response.result.names)
# get descriptors for the node if needs to print parameter type
name_to_type_map = {}
if args.param_type is True:
resp = call_describe_parameters(
node=node, node_name=node_name.full_name,
parameter_names=sorted_names)
for descriptor in resp.descriptors:
name_to_type_map[descriptor.name] = get_parameter_type_string(
descriptor.type)
for name in sorted_names:
if args.param_type is True:
param_type_str = name_to_type_map[name]
print(f' {name} (type: {param_type_str})')
else:
print(f' {name}')
else:
e = future.exception()
print(
'Exception while calling service of node '
f"'{node_name.full_name}': {e}", file=sys.stderr)
|
coeusite/cnemc_calculator | cnemc_calculator/functions.py | import numpy as np
import pandas as pd
def sci_round(df, digi=0):
''' sci_round function
四舍六入五成双修约函数近似算法
'''
return np.round(np.round(df, 15), digi) |
coeusite/cnemc_calculator | bin/calculate_aqi.py | <reponame>coeusite/cnemc_calculator
import pandas as pd
import numpy as np
import cnemc_calculator
from importlib import reload
## calculate_daily_aqi
cnemc_calculator = reload(cnemc_calculator)
data = pd.read_excel('data/审核后点位日均值.xlsx', header=0, index_col=[3,4], na_values=[-1,-99], sheetname='2016')
column_names = ['SO2', 'NO2', 'PM10', 'CO(mg/m3)', 'O3', 'O3-8h','PM2.5']
data[column_names] = data[column_names].convert_objects(convert_numeric=True)
data['CO(mg/m3)']=cnemc_calculator.functions.sci_round(data['CO(mg/m3)'])
data_aqi = cnemc_calculator.calculate_daily_aqi(data, column_names)
data_aqi.to_excel('data/aqi.xlsx')
index = data_aqi <= 0
index = index.sum(axis=1)>0
data_aqi[index,'AQI']
data_aqi.loc[index]
# df = pd.read_excel('data/iAQI限值H.xlsx', index_col=0)
# print("df = pd.DataFrame( {} )".format(str(df.to_dict())))
comp = pd.concat((data['AQI'], data_aqi['AQI']),axis=1) |
coeusite/cnemc_calculator | setup.py | from distutils.core import setup
setup(
name='cnemc_calculator',
version='0.1.0',
author='CoeusITE',
author_email='<EMAIL>',
packages=['cnemc_calculator', 'cnemc_calculator.test'],
scripts=[],
url='https://github.com/coeusite/cnemc_calculator',
license='LICENSE',
description='Unofficial calculator for air quality factors.',
long_description=open('README.txt').read(),
install_requires=[
"pandas >= 0.20.1",
],
) |
coeusite/cnemc_calculator | cnemc_calculator/__init__.py | from .calculate_aqi import calculate_daily_aqi, calculate_hourly_aqi, calculate_aqi |
coeusite/cnemc_calculator | cnemc_calculator/calculate_aqi.py | <reponame>coeusite/cnemc_calculator
import numpy as np
import pandas as pd
from .constants import *
from .functions import *
def calculate_daily_aqi(data, column_names):
''' calculate_daily_aqi funcion
data: a pandas dataframe
column_names: names of factor columns insequence of ['SO2', 'NO2', 'PM10', 'CO', 'O3', 'O3_8H', 'PM_25']
iAQI = 501 means it exceeds the upper limit
'''
return calculate_aqi(data, column_names, version='HJ663-2012')
def calculate_hourly_aqi(data, column_names):
''' calculate_daily_aqi funcion
data: a pandas dataframe
column_names: names of factor columns insequence of ['SO2', 'NO2', 'PM10', 'CO', 'O3', 'PM_25']
iAQI = 501 means it exceeds the upper limit
'''
return calculate_aqi(data, column_names, version='HJ663-2012@H')
def calculate_aqi(data, column_names, version='HJ663-2012'):
''' calculate_daily_aqi funcion
data: a pandas dataframe
column_names: names of factor columns insequence of ['SO2', 'NO2', 'PM10', 'CO', 'O3', 'O3_8H', 'PM_25']
iAQI = 501 means it exceeds the upper limit
'''
tmp_data = data[column_names].convert_objects(convert_numeric=True)
if version[-2:] == '@H':
factors = AIR_POLLUTANTS_H
key_factors = AIR_POLLUTANTS_H
else:
factors = AIR_POLLUTANTS_7
key_factors = AIR_POLLUTANTS
tmp_data.columns = factors
tmp_iaqi = pd.DataFrame(dtype=np.float, index = tmp_data.index, columns = factors)
# calculate iaqi
gaps = np.concatenate([[STANDARD_LIMITS[version].index[1:].values, STANDARD_LIMITS[version].index[:-1].values]]).T
for [high, low] in gaps[::-1]:
#print(high, low)
_set_iaqi(tmp_iaqi, tmp_data, high, low, version)
#print(tmp_iaqi.head())
# 超上限数据
tmp_iaqi[tmp_data > 500] = 501
# 无效数据
tmp_iaqi[tmp_data <= 0] = -1
tmp_iaqi[tmp_data.isnull()] = -1
if version[-2:] == '@H':
# TODO: SO2-1H大于800后按SO2-24H计算
index = tmp_iaqi.SO2 > 200
# tmp_iaqi.loc[index, 'SO2'] = tmp_iaqi.loc[index, 'SO2_24H']
else:
# O3-8H大于800后按O3-1H计算
index = tmp_iaqi.O3_8H > 300
tmp_iaqi.loc[index, 'O3_8H'] = tmp_iaqi.loc[index, 'O3']
# 修约
tmp_iaqi = sci_round(tmp_iaqi, 0)
# 无效数据
index = tmp_iaqi <= 0
index = index.sum(axis=1)>0
# calculate AQI
tmp_iaqi['AQI'] = tmp_iaqi[key_factors].max(axis=1)
tmp_iaqi.loc[index, 'AQI'] = -1
return tmp_iaqi.astype(np.int)
def _set_iaqi(tmp_iaqi, tmp_data, high, low, version='HJ663-2012'):
if version[-2:] == '@H':
factors = AIR_POLLUTANTS_H
else:
factors = AIR_POLLUTANTS_7
low_end = _standards_v2m(STANDARD_LIMITS[version][factors].loc[low].values, len(tmp_iaqi), tmp_data)
high_end = _standards_v2m(STANDARD_LIMITS[version][factors].loc[high].values, len(tmp_iaqi), tmp_data)
#index = (tmp_data.as_matrix() <= high_end) & (tmp_data.as_matrix() > low_end)
index = (tmp_data <= high_end) & (tmp_data > low_end)
tmp = (tmp_data - low_end) / (high_end - low_end) * (high - low) + low
tmp_iaqi[index] = tmp[index]
return
def _standards_v2m(v, t, tmp_data, a=0):
return pd.DataFrame(np.repeat(v.reshape(1, v.size), t, axis=a), index=tmp_data.index, columns=tmp_data.columns) |
amit0902/Topic-Modeling | lda.py | <filename>lda.py
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 27 20:01:00 2021
@author: <NAME>
"""
#Data
import sys
import re, numpy as np, pandas as pd
from pprint import pprint
# Gensim
import gensim, spacy, logging, warnings
import gensim.corpora as corpora
from gensim.utils import lemmatize, simple_preprocess
from gensim.models import CoherenceModel
# NLTK Stop words
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use', 'not', 'would', 'say', 'could', '_', 'be', 'know', 'good', 'go', 'get', 'do', 'done', 'try', 'many', 'some', 'nice', 'thank', 'think', 'see', 'rather', 'easy', 'easily', 'lot', 'lack', 'make', 'want', 'seem', 'run', 'need', 'even', 'right', 'line', 'even', 'also', 'may', 'take', 'come'])
warnings.filterwarnings("ignore",category=DeprecationWarning)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
#Streamlit
import streamlit as st
# Sklearn
from sklearn.decomposition import LatentDirichletAllocation, TruncatedSVD
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import GridSearchCV
html_temp = """
<div style ="background-color:orange;padding:13px">
<h1 style ="color:black;text-align:center;">Group - 1 Batch : P-60 </h1>
</div>
"""
# this line allows us to display the front end aspects we have
# defined in the above code
st.markdown(html_temp, unsafe_allow_html = True)
result =""
# giving the webpage a title
st.title("Topic Prediction")
st.header("This application helps you classify News Topic from any given article whether it is Political or Sports")
st.subheader("This model accompanies LDA (Latent Dirichlet Allocation) Library")
a = st.text_input("Enter your Text Data:","Type here...")
if(st.button('Submit')):
result = a.title()
st.success(result)
def sent_to_words(sentences):
for sent in sentences:
sent = re.sub('\S*@\S*\s?', '', sent) # remove emails
sent = re.sub('\s+', ' ', sent) # remove newline chars
sent = re.sub("\'", "", sent) # remove single quotes
sent = gensim.utils.simple_preprocess(str(sent), deacc=True)
yield(sent)
# Convert to list
df = pd.read_csv(r"C:\Users\<NAME>\Desktop\Topic Modeling\Topic_Modeling\Politics_Sports_News_Cluster.csv")
data = df.Headlines.values.tolist()
data_words = list(sent_to_words(data))
print(data_words[:1])
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
texts_out1 = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out1.append(" ".join([token.lemma_ if token.lemma_ not in ['-PRON-'] else '' for token in doc if token.pos_ in allowed_postags]))
return texts_out1
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# Do lemmatization keeping only Noun, Adj, Verb, Adverb
data_lemmatized = lemmatization(data_words, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
print(data_lemmatized[:2])
vectorizer = CountVectorizer(analyzer='word',
min_df=10, # minimum reqd occurences of a word
stop_words='english', # remove stop words
lowercase=True, # convert all words to lowercase
token_pattern='[a-zA-Z0-9]{3,}', # num chars > 3
# max_features=50000, # max number of uniq words
)
data_vectorized = vectorizer.fit_transform(data_lemmatized)
# Materialize the sparse data
data_dense = data_vectorized.todense()
# Compute Sparsicity = Percentage of Non-Zero cells
print("Sparsicity: ", ((data_dense > 0).sum()/data_dense.size)*100, "%")
# Build LDA Model
lda_model = LatentDirichletAllocation(n_components=4, # Number of topics
max_iter=10, # Max learning iterations
learning_method='online',
random_state=100, # Random state
batch_size=20, # n docs in each learning iter
evaluate_every = -1, # compute perplexity every n iters, default: Don't
n_jobs = -1, # Use all available CPUs
)
lda_output = lda_model.fit_transform(data_vectorized)
print(lda_model) # Model attributes
# Log Likelyhood: Higher the better
print("Log Likelihood: ", lda_model.score(data_vectorized))
# Perplexity: Lower the better. Perplexity = exp(-1. * log-likelihood per word)
print("Perplexity: ", lda_model.perplexity(data_vectorized))
# See model parameters
pprint(lda_model.get_params())
# Define Search Param
search_params = {'n_components': [2,4,6,8,10,12,14,16,18,20], 'learning_decay': [.5, .7, .9]}
# Init the Model
lda = LatentDirichletAllocation()
# Init Grid Search Class
model = GridSearchCV(lda, param_grid=search_params)
# Do the Grid Search
model.fit(data_vectorized)
# Best Model
best_lda_model = model.best_estimator_
# Model Parameters
print("Best Model's Params: ", model.best_params_)
# Log Likelihood Score
print("Best Log Likelihood Score: ", model.best_score_)
# Perplexity
print("Model Perplexity: ", best_lda_model.perplexity(data_vectorized))
# Create Document - Topic Matrix
lda_output = best_lda_model.transform(data_vectorized)
# column names
topicnames = ["Topic" + str(i) for i in range(best_lda_model.n_components)]
# index names
docnames = ["Doc" + str(i) for i in range(len(data))]
# Make the pandas dataframe
df_document_topic = pd.DataFrame(np.round(lda_output, 2), columns=topicnames, index=docnames)
# Get dominant topic for each document
dominant_topic = np.argmax(df_document_topic.values, axis=1)
df_document_topic['dominant_topic'] = dominant_topic
df_topic_distribution = df_document_topic['dominant_topic'].value_counts().reset_index(name="Num Documents")
df_topic_distribution.columns = ['Topic Num', 'Num Documents']
df_topic_distribution
# Topic-Keyword Matrix
df_topic_keywords = pd.DataFrame(best_lda_model.components_)
# Assign Column and Index
df_topic_keywords.columns = vectorizer.get_feature_names()
df_topic_keywords.index = topicnames
# View
df_topic_keywords.head()
# Show top n keywords for each topic
def show_topics(vectorizer=vectorizer, lda_model=lda_model, n_words=20):
keywords = np.array(vectorizer.get_feature_names())
topic_keywords = []
for topic_weights in lda_model.components_:
top_keyword_locs = (-topic_weights).argsort()[:n_words]
topic_keywords.append(keywords.take(top_keyword_locs))
return topic_keywords
topic_keywords = show_topics(vectorizer=vectorizer, lda_model=best_lda_model, n_words=15)
# Topic - Keywords Dataframe
df_topic_keywords = pd.DataFrame(topic_keywords)
df_topic_keywords.columns = ['Word '+str(i) for i in range(df_topic_keywords.shape[1])]
df_topic_keywords.index = ['Topic '+str(i) for i in range(df_topic_keywords.shape[0])]
df_topic_keywords
# Define function to predict topic for a given text document.
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
def predict_topic(text, nlp=nlp):
global sent_to_words
global lemmatization
# Step 1: Clean with simple_preprocess
mytext_2 = list(sent_to_words(text))
# Step 2: Lemmatize
mytext_3 = lemmatization(mytext_2, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
# Step 3: Vectorize transform
mytext_4 = vectorizer.transform(mytext_3)
# Step 4: LDA Transform
topic_probability_scores = best_lda_model.transform(mytext_4)
topic = df_topic_keywords.iloc[np.argmax(topic_probability_scores), :].values.tolist()
return topic, topic_probability_scores
a = [a]
topic, topic_probability_scores = predict_topic(text = a)
b = topic_probability_scores
#c = topic_probability_scores[1]
st.subheader('Topic KeyWords:')
st.write(topic)
st.subheader('Topic Probability')
st.write(b)
st.subheader('Topic Identified:')
st.write('This is Political' if pd.Series(b[0][0]>0.5).item() else 'This is Sports')
# # Predict the topic
# mytext = ["This week in US politics: Biden takes on Facebook, cosies up to Fox News, to battle vaccine hesitancy"]
# topic, prob_scores = predict_topic(text = mytext)
# print(topic)
# prob_scores[0][0]
# print('Political' if prob_scores[0][0]>0.5 else 'Sports')
# # Construct the k-means clusters
# from sklearn.cluster import KMeans
# clusters = KMeans(n_clusters=15, random_state=100).fit_predict(lda_output)
# from sklearn.metrics.pairwise import euclidean_distances
# nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# def similar_documents(text, doc_topic_probs, documents = data, nlp=nlp, top_n=5, verbose=False):
# topic, x = predict_topic(text)
# dists = euclidean_distances(x.reshape(1, -1), doc_topic_probs)[0]
# doc_ids = np.argsort(dists)[:top_n]
# if verbose:
# print("Topic KeyWords: ", topic)
# print("Topic Prob Scores of text: ", np.round(x, 1))
# print("Most Similar Doc's Probs: ", np.round(doc_topic_probs[doc_ids], 1))
# return doc_ids, np.take(documents, doc_ids)
# html_temp = """
# <div style ="background-color:orange;padding:13px">
# <h1 style ="color:black;text-align:center;">Group - 1 Batch - P60 </h1>
# </div>
# """
# # this line allows us to display the front end aspects we have
# # defined in the above code
# st.markdown(html_temp, unsafe_allow_html = True)
# result =""
# a = st.subheader("Enter your Text Data:")
# # Get similar documents
# mytext = ["How Politics Changed in 30 Years of Reforms: CMs became powerful, women voted more, west & south marched ahead of north & east"]
# doc_ids, docs = similar_documents(text=mytext, doc_topic_probs=lda_output, documents = data, top_n=1, verbose=True)
# print('\n', docs[0][:500])
# st.subheader("This model accompanies LDA (Latent Dirichlet Allocation) Library")
# # giving the webpage a title
# st.title("Topic Prediction")
# st.header("This application helps you classify News Topic from any given article whether is Political or Sports") |
dimamelnik22/drawfulru | game/migrations/0011_onlinegame_playersready.py | # Generated by Django 3.0 on 2019-12-23 06:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0010_auto_20191223_0818'),
]
operations = [
migrations.AddField(
model_name='onlinegame',
name='playersready',
field=models.IntegerField(default=0),
),
]
|
dimamelnik22/drawfulru | game/models.py | from django.db import models
import datetime
from user_profile.models import Profile
import random
class Phrase(models.Model):
name = models.CharField(max_length=100)
author = models.ForeignKey(Profile, on_delete=models.SET_NULL, null=True)
gamesCount = models.IntegerField(default=0)
rating = models.DecimalField(max_digits=3, decimal_places=2, default=0.0)
def __str__(self):
return self.name
class PhrasePack(models.Model):
title = models.CharField(max_length=100)
phrases = models.ManyToManyField(Phrase)
rating = models.DecimalField(max_digits=3, decimal_places=2, default=0.0)
gamesCount = models.IntegerField(default=0)
def __str__(self):
return self.title
def randomPack(self):
count = PhrasePack.objects.all().count()
random_index = randint(0, count - 1)
return random.sample(Phrase.objects.all(), numOfObjects)
class PlayedGame(models.Model):
playDate = models.DateField(("Date"), default=datetime.date.today)
players = models.ManyToManyField(Profile)
phrasePack = models.ForeignKey(PhrasePack, on_delete=models.SET_NULL, null=True)
def __str__(self):
return str(self.playDate)
class Image(models.Model):
image = models.TextField()
canvas_image = models.TextField()
class Round(models.Model):
image = models.ForeignKey(Image, on_delete=models.SET_NULL, null=True)
player = models.ForeignKey(Profile, on_delete=models.SET_NULL, null=True)
finished = models.BooleanField(default=False)
phrases = models.ManyToManyField(Phrase)
class PlayersChoice(models.Model):
player = models.ForeignKey(Profile, on_delete=models.SET_NULL, null=True)
chosenPhrase = models.ForeignKey(Phrase, on_delete=models.CASCADE)
ground = models.ForeignKey(Round, on_delete=models.CASCADE)
class PlayersNewPhrase(models.Model):
player = models.ForeignKey(Profile, on_delete=models.SET_NULL, null=True)
newPhrase = models.ForeignKey(Phrase, on_delete=models.CASCADE)
ground = models.ForeignKey(Round, on_delete=models.CASCADE)
class OnlineGame(models.Model):
players = models.ManyToManyField(Profile)
phrasePack = models.ForeignKey(PhrasePack, on_delete=models.SET_NULL, null=True)
isStarted = models.BooleanField(default=False)
rounds = models.ManyToManyField(Round)
curround = models.IntegerField(default = 0)
playersready = models.BooleanField(default = False)
|
dimamelnik22/drawfulru | user_profile/migrations/0007_auto_20191226_0128.py | # Generated by Django 3.0 on 2019-12-25 22:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0006_profile_number'),
]
operations = [
migrations.AddField(
model_name='profile',
name='isReady',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='profile',
name='rating',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=3),
),
]
|
dimamelnik22/drawfulru | game/views.py | from django.shortcuts import render, redirect
from django.http import HttpResponse,HttpResponseRedirect
from game.models import PhrasePack, Phrase, PlayedGame, Image, OnlineGame, Round, PlayersChoice, PlayersNewPhrase
from user_profile.models import Profile
from .forms import RoundResultFull, RoundResultScoreOnly, LoginForm, UserRegisterForm, ChooseAnswer
from django.contrib.auth.models import User
from allauth.socialaccount.models import SocialAccount
import random
import datetime
from django.views.decorators.csrf import csrf_exempt
from django.core import serializers
import requests
# Create your views here.
def home_view(request):
games = OnlineGame.objects.all()
if request.method == 'POST':
newPack = PhrasePack()
newPack.title = "randomaddedbybutton"+str(datetime.datetime.now())
newPack.save()
newPack.phrases.set(Phrase.objects.order_by('?')[:8])
newPack.save()
return redirect('game-home')
player = "unnamed"
if request.user.is_active:
player = Profile.objects.get(user = request.user)
if not player:
player = "unnamed"
else:
player = player.name
return render(request, 'game/home.html', context={'games': games,'data':PhrasePack.objects.all(),'name': player})
def auth_view(request):
return render(request,'game/auth.html')
def number_view(request):
profiles = Profile.objects.all()
form = LoginForm(request.POST or None)
p = request.user.profile
if p.number != None:
return redirect('game-home')
if form.is_valid():
p.number = form.cleaned_data['name']
p.save()
return redirect('game-home')
return render(request,'game/number.html', context={'form':form})
def nick_view(request):
profiles = Profile.objects.all()
for p in profiles:
if request.user.id == p.user.id:
return redirect('game-number')
form = LoginForm(request.POST or None)
if form.is_valid():
profile = Profile()
profile.name = form.cleaned_data['name']
profile.user = request.user
profile.save()
return redirect('game-number')
return render(request,'game/nick.html', context={'form':form})
def login_view(request):
form = UserRegisterForm(request.POST or None)
if form.is_valid():
user = form.save()
profile = Profile()
profile.user = user
profile.save()
return redirect('game-home')
return render(request, 'game/login.html', {'form': form})
def room_view(request,packid):
game = OnlineGame()
game.save()
game.players.add(Profile.objects.get(user = request.user))
game.phrasePack = PhrasePack.objects.get(id = packid)
game.save()
# pl = request.user.profile.name
# pp = game.phrasePack.title
# rate = game.phrasePack.rating
# for p in Profile.objects.all():
# phone = p.number
# if len(phone) == 12:
# data = {
# 'phoneNumber': phone,
# 'message': f'New game started! Host: {pl}. Phrase pack: {pp}. Rating: {rate}'
# }
# response = requests.post('https://ixl8j3vad0.execute-api.us-east-1.amazonaws.com/myNewStage/userinfo', json=data, headers={'Content-type': 'application/json'})
return redirect("game-roomjoin", gameid=game.id)
#return render(request, 'game/room.html', context={'phrases':game.phrasePack.phrases.all(), 'players':game.players, 'name':Profile.objects.get(user = request.user).name})
def roomjoin_view(request,gameid):
game = OnlineGame.objects.get(id = gameid)
if not game.players.filter(user = request.user):
game.players.add(Profile.objects.get(user = request.user))
game.save()
if (game.players.all().count() >= 2) and game.players.all()[0].user == request.user and not game.isStarted:
game.isStarted = True
for index,pl in enumerate(game.players.all()):
ground = Round()
ground.player = pl
ground.save()
nphrase = Phrase()
nphrase.gamesCount = -1
nphrase.name = game.phrasePack.phrases.all()[index].name
nphrase.author = pl
nphrase.save()
ground.phrases.add(nphrase)
ground.save()
game.rounds.add(ground)
game.save()
return render(request, 'game/room.html', context={'game':game,'phrases':game.phrasePack.phrases.all(), 'players':game.players.all(), 'name':Profile.objects.get(user = request.user).name})
@csrf_exempt
def lobby_view(request,gameid):
cplayer = request.user.profile
cplayer.waitingFor = 0
cplayer.save()
game = OnlineGame.objects.get(id = gameid)
roundid = game.rounds.all()[0].id
if request.method == 'GET':
player = Profile.objects.get(user = request.user)
ground = game.rounds.get(player = player)
phrase = ground.phrases.all()[0]
return render(request, 'game/lobby.html', context={'roundid':roundid,'phrase':phrase,'game':game,'rounds':game.rounds.all(),'players':game.players.all(), 'name':Profile.objects.get(user = request.user).name})
elif request.method == 'POST':
data = request.POST['save_cdata']
image = request.POST['save_image']
file_data = Image( image=data, canvas_image=image)
file_data.save()
player = Profile.objects.get(user = request.user)
ground = game.rounds.get(player = player)
ground.image = file_data
ground.save()
return redirect('/room/'+str(game.id)+'/suggest/')
def suggesting_view(request,gameid,roundid):
game = OnlineGame.objects.get(id = gameid)
player = Profile.objects.get(user = request.user)
if request.method == 'GET':
player.waitingFor +=1
player.save()
if game.curround >= game.rounds.count():
return render(request,'game/allresult.html', context={'game':game})
ground = game.rounds.get(id = roundid)
if ground.player == player:
return render(request, 'game/suggesting.html', context={'game':game, 'image':ground.image.image, 'roundid':roundid})
form = LoginForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
phrase = Phrase()
phrase.name = form.cleaned_data['name']
phrase.author = Profile.objects.get(user = request.user)
phrase.gamesCount = -1
newPhrase = PlayersNewPhrase()
phrase.save()
newPhrase.player = phrase.author
newPhrase.ground = ground
newPhrase.newPhrase = phrase
newPhrase.save()
return redirect('/room/'+str(game.id)+'/wait/'+str(roundid)+'/guess')
#return wait_view(request,game.id,roundid,'guess')
#return render(request, 'game/suggesting.html', context={'game':game, 'image':ground.image.image, 'roundid':roundid})
return render(request, 'game/suggesting.html', context={'game':game, 'image':ground.image.image, 'roundid':roundid,'form':form})
def guessing_view(request,gameid,roundid):
game = OnlineGame.objects.get(id = gameid)
player = Profile.objects.get(user = request.user)
ground = game.rounds.get(id = roundid)
form = ChooseAnswer(request.POST or None)
if request.user.profile == game.players.all()[0]:
for np in PlayersNewPhrase.objects.filter(ground = ground):
ground.phrases.add(np.newPhrase)
ground.save()
np.delete()
phrases = ground.phrases.order_by('?')[:8]
ground.phrases.set(phrases)
ground.save()
phrases = ground.phrases.all()
if request.method == 'GET':
player.waitingFor +=1
player.save()
if ground.player == player:
return render(request, 'game/guessing.html', context={'game':game, 'image':ground.image.image,'phrases':ground.phrases.all(), 'roundid':roundid})
if request.method == 'POST' and form.is_valid():
index = form.cleaned_data['Your_Choice']-1
phrase = ground.phrases.all()[index]
choice = PlayersChoice()
choice.player = request.user.profile
choice.chosenPhrase = phrase
choice.ground = ground
choice.save()
return redirect('/room/'+str(game.id)+'/wait/'+str(roundid)+'/result')
#return wait_view(request,game.id,roundid,'result')
#return render(request, 'game/guessing.html', context={'game':game, 'image':ground.image.image,'phrases':ground.phrases.all(), 'roundid':roundid})
return render(request, 'game/guessing.html', context={'form':form,'game':game, 'image':ground.image.image,'phrases':phrases, 'roundid':roundid})
def result_view(request,gameid,roundid):
game = OnlineGame.objects.get(id = gameid)
player = Profile.objects.get(user = request.user)
if request.method == 'GET':
player.waitingFor+=1
player.save()
tphrase = game.phrasePack.phrases.all()[game.curround]
ground = game.rounds.get(id = roundid)
if not ground.finished:
for pc in PlayersChoice.objects.filter(ground=ground):
phrase = pc.chosenPhrase
phrase.rating = float(phrase.rating) + 1.0
phrase.save()
pc.delete()
for p in ground.phrases.all():
p.author.score += p.rating
p.author.save()
game.curround +=1
game.save()
ground.finished = True
ground.save()
if game.curround < game.rounds.count():
roundid = game.rounds.all()[game.curround].id
return render(request, 'game/result.html', context={'tphrase':tphrase,'phrases':ground.phrases.all(), 'image':ground.image.image, 'roundid':roundid, 'game':game})
def wait_view(request,gameid,roundid,nextstage):
game = OnlineGame.objects.get(id = gameid)
player = Profile.objects.get(user = request.user)
ready = False
for p in game.players.all():
if p.waitingFor > player.waitingFor:
ready = True
if request.user.profile == game.players.all()[0]:
equal = True
for p in game.players.all():
if p.waitingFor != request.user.profile.waitingFor:
equal = False
if equal:
ready = True
return render(request, 'game/wait.html', context = {'ready':ready,'game':game, 'next':nextstage, 'roundid':roundid, 'player':player})
def end_view(request,gameid):
if OnlineGame.objects.filter(id = gameid):
game = OnlineGame.objects.get(id = gameid)
if request.user.profile == game.players.all()[0]:
# text = ""
# sum = 0
# for r in game.rounds.all():
# text+=str(r.phrases.get(author = request.user.profile).rating)+" , "
# sum+=r.phrases.get(author = request.user.profile).rating
# phone = request.user.profile.number
# if len(phone) == 12:
# data = {
# 'phoneNumber': phone,
# 'message': f'Nice Game! Your rezults: {text} Total: {sum}'
# }
# response = requests.post('https://ixl8j3vad0.execute-api.us-east-1.amazonaws.com/myNewStage/userinfo', json=data, headers={'Content-type': 'application/json'})
# print(response)
newPack = []
for r in game.rounds.all():
if len(r.phrases.all()) > 0:
phrase = r.phrases.order_by('-rating')[0]
if game.phrasePack.phrases.filter(name = phrase.name):
sphrase = game.phrasePack.phrases.get(name = phrase.name)
if sphrase.gamesCount >= 10 :
sphrase.rating = float(sphrase.rating)*0.9+float(phrase.rating)*0.1
else:
sphrase.rating = (sphrase.rating*sphrase.gamesCount+float(phrase.rating))/(sphrase.gamesCount+1)
sphrase.gamesCount += 1
sphrase.save()
if len(r.phrases.all())>1:
if r.phrases.order_by('-rating')[1].rating > 0:
phrase = r.phrases.order_by('-rating')[1]
name = phrase.name
author = phrase.author
rating = phrase.rating
for p in r.phrases.all():
p.delete()
if not Phrase.objects.filter(name = name):
phrase = Phrase()
phrase.name = name
phrase.author = author
phrase.save()
author.numOfAuthoredPhrases+=1
author.save()
else:
phrase = Phrase.objects.get(name = name)
newPack.append(phrase)
r.image.delete()
r.delete()
for p in game.players.all():
p.score = 0
p.save()
for i in range(len(newPack),8):
newPack.append(game.phrasePack.phrases.all()[i])
pp = PhrasePack()
pp.title = 'fromgame'+str(datetime.datetime.now())
pp.save()
pp.phrases.set(newPack)
pp.save()
game.phrasePack.gamesCount+=1
game.phrasePack.save()
game.delete()
return redirect('game-home')
def anot():
rounds = 8
players = 8
forms = []
for i in range(rounds):
forms.append([])
forms[i].append("Фраза раунда: " + PhrasePack.objects.get(id = packid).phrases.all()[i].name)
forms[i].append(RoundResultScoreOnly(request.POST or None, prefix = str(i)))
for j in range(players-1):
forms[i].append(RoundResultFull(request.POST or None, prefix = str(i)+str(j)))
if request.method == 'POST':
newPackPhrases = []
phraseSuccess = 0
packRating = 0
game = PlayedGame()
game.save()
playerslist = Profile.objects.all()
game.players.set(playerslist.exclude(user = User.objects.get(username = "dimme")))
game.phrasePack = PhrasePack.objects.get(id = packid)
game.save()
for i in range(rounds):
sphrase = []
if forms[i][1].is_valid():
packRating += forms[i][1].cleaned_data['score']
sphrase = PhrasePack.objects.get(id = packid).phrases.all()[i]
if sphrase.gamesCount >= 10 :
sphrase.rating = float(sphrase.rating)*0.9+forms[i][1].cleaned_data['score']*0.1
else:
sphrase.rating = (sphrase.rating*sphrase.gamesCount+forms[i][1].cleaned_data['score'])/(sphrase.gamesCount+1)
sphrase.gamesCount += 1
sphrase.save()
bestPhrase = Phrase()
for j in range(2,players+1):
if forms[i][j].is_valid():
phrase = Phrase.objects.filter(name = forms[i][j].cleaned_data['phrase'])
if not phrase:
phrase = Phrase()
#phrase.author = Profile.objects.get(user = User.objects.get(username = forms[i][j].cleaned_data['player']))
phrase.name = forms[i][j].cleaned_data['phrase']
phrase.rating = forms[i][j].cleaned_data['score']
phrase.save()
phraseSuccess += 1
elif forms[i][j].cleaned_data['score'] > 0 :
phrase = Phrase.objects.get(name = forms[i][j].cleaned_data['phrase'])
phrase.gamesCount += 1
if phrase.gamesCount >= 10 :
phrase.rating = phrase.rating*0.9+forms[i][j].cleaned_data['score']*0.1
else:
phrase.rating = (phrase.rating*phrase.gamesCount+forms[i][j].cleaned_data['score'])/(phrase.gamesCount+1)
phrase.save()
phrase.author.numOfAuthoredPhrases += 1
phrase.author.score += forms[i][j].cleaned_data['score']
phrase.author.save()
if phrase.rating > bestPhrase.rating:
bestPhrase = phrase
if bestPhrase.rating < 1:
newPackPhrases.append(sphrase)
else:
newPackPhrases.append(bestPhrase)
currentPack = PhrasePack.objects.get(id = packid)
packRating /= 8
if packRating > 3.5:
packRating = 7 - packRating
packRating = packRating/3.5*5
currentPack.rating = packRating
currentPack.gamesCount += 1
currentPack.save()
if phraseSuccess > 4:
newPack = PhrasePack()
newPack.title = "random"
newPack.save()
newPack.phrases.set(newPackPhrases)
newPack.save()
return redirect('game-home') |
dimamelnik22/drawfulru | user_profile/models.py | from django.db import models
from django.contrib.auth.models import User
from allauth.socialaccount.models import SocialAccount
# Create your models here.
class Profile(models.Model):
status = models.CharField(default="user",max_length=100)
score = models.IntegerField(default=0)
numOfAuthoredPhrases = models.IntegerField(default=0)
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=100,default="unnamed")
number = models.CharField(max_length=12,null=True)
rating = models.DecimalField(max_digits=3, decimal_places=2, default=0.0)
waitingFor = models.IntegerField(default=0)
# % 3
# 0 -suggest
# 1 -guess
# 2 -result
def __str__(self):
return self.name
|
dimamelnik22/drawfulru | user_profile/migrations/0003_auto_20191222_1829.py | <gh_stars>0
# Generated by Django 3.0 on 2019-12-22 15:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('socialaccount', '0003_extra_data_default_dict'),
('user_profile', '0002_auto_20191217_1342'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='socialaccount.SocialAccount'),
),
]
|
dimamelnik22/drawfulru | game/migrations/0012_playerschoice_playersnewphrase.py | <filename>game/migrations/0012_playerschoice_playersnewphrase.py
# Generated by Django 3.0 on 2019-12-25 22:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0007_auto_20191226_0128'),
('game', '0011_onlinegame_playersready'),
]
operations = [
migrations.CreateModel(
name='PlayersNewPhrase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ground', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Round')),
('newPhrase', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Phrase')),
('player', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='user_profile.Profile')),
],
),
migrations.CreateModel(
name='PlayersChoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chosenPhrase', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Phrase')),
('ground', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Round')),
('player', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='user_profile.Profile')),
],
),
]
|
dimamelnik22/drawfulru | user_profile/migrations/0008_auto_20191226_0231.py | # Generated by Django 3.0 on 2019-12-25 23:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0007_auto_20191226_0128'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='isReady',
),
migrations.AddField(
model_name='profile',
name='waitingFor',
field=models.IntegerField(default=0),
),
]
|
dimamelnik22/drawfulru | user_profile/migrations/0002_auto_20191217_1342.py | # Generated by Django 3.0 on 2019-12-17 10:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='status',
field=models.CharField(default='user', max_length=100),
),
]
|
dimamelnik22/drawfulru | game/migrations/0010_auto_20191223_0818.py | <gh_stars>0
# Generated by Django 3.0 on 2019-12-23 05:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('game', '0009_onlinegame_curround'),
]
operations = [
migrations.RemoveField(
model_name='round',
name='mainscore',
),
migrations.RemoveField(
model_name='round',
name='score1',
),
migrations.RemoveField(
model_name='round',
name='score2',
),
migrations.RemoveField(
model_name='round',
name='score3',
),
migrations.RemoveField(
model_name='round',
name='score4',
),
migrations.RemoveField(
model_name='round',
name='score5',
),
migrations.RemoveField(
model_name='round',
name='score6',
),
migrations.RemoveField(
model_name='round',
name='score7',
),
]
|
dimamelnik22/drawfulru | game/urls.py | <reponame>dimamelnik22/drawfulru<gh_stars>0
from django.conf.urls import url
from . import views
from django.urls import path, include
urlpatterns = [
path('', views.home_view, name="game-home"),
path('auth/', views.auth_view, name="game-auth"),
path('nick/', views.nick_view, name="game-nick"),
path('number/', views.number_view, name="game-number"),
path('room/<packid>/', views.room_view, name="game-room"),
path('room/<gameid>/join', views.roomjoin_view, name="game-roomjoin"),
path('room/<gameid>/draw/', views.lobby_view, name="game-lobby"),
path('room/<gameid>/wait/<roundid>/<nextstage>', views.wait_view, name="game-wait"),
path('room/<gameid>/suggest/<roundid>', views.suggesting_view, name="game-suggesting"),
path('room/<gameid>/guess/<roundid>', views.guessing_view, name="game-guessing"),
path('room/<gameid>/result/<roundid>', views.result_view, name="game-result"),
path('room/<gameid>/end', views.end_view, name="game-end"),
path('login', views.login_view, name="game-login"),
path('accounts/', include('allauth.urls'))
] |
dimamelnik22/drawfulru | game/migrations/0016_auto_20191226_0326.py | # Generated by Django 3.0 on 2019-12-26 00:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('game', '0015_auto_20191226_0320'),
]
operations = [
migrations.AlterField(
model_name='playerschoice',
name='chosenPhrase',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Phrase'),
),
migrations.AlterField(
model_name='playersnewphrase',
name='newPhrase',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game.Phrase'),
),
]
|
dimamelnik22/drawfulru | game/migrations/0002_phrase_author.py | # Generated by Django 3.0 on 2019-12-17 02:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0001_initial'),
('game', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='phrase',
name='author',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='user_profile.Profile'),
),
]
|
dimamelnik22/drawfulru | game/forms.py | <reponame>dimamelnik22/drawfulru
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class UserRegisterForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ['username', 'email', '<PASSWORD>', '<PASSWORD>']
class RoundResultFull(forms.Form):
player = forms.CharField()
phrase = forms.CharField()
score = forms.IntegerField()
class RoundResultScoreOnly(forms.Form):
score = forms.IntegerField()
class LoginForm(forms.Form):
name = forms.CharField()
class ChooseAnswer(forms.Form):
Your_Choice = forms.TypedChoiceField(choices=[(x, x) for x in range(1, 9)], coerce=int)
#Your_Choice = forms.IntegerField() |
dimamelnik22/drawfulru | game/migrations/0014_auto_20191226_0320.py | # Generated by Django 3.0 on 2019-12-26 00:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('game', '0013_auto_20191226_0150'),
]
operations = [
migrations.AlterField(
model_name='playersnewphrase',
name='newPhrase',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='game.Phrase'),
),
]
|
dimamelnik22/drawfulru | game/migrations/0008_remove_onlinegame_password.py | # Generated by Django 3.0 on 2019-12-22 23:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('game', '0007_auto_20191223_0240'),
]
operations = [
migrations.RemoveField(
model_name='onlinegame',
name='password',
),
]
|
dimamelnik22/drawfulru | user_profile/migrations/0006_profile_number.py | # Generated by Django 3.0 on 2019-12-23 07:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0005_auto_20191222_1911'),
]
operations = [
migrations.AddField(
model_name='profile',
name='number',
field=models.CharField(max_length=12, null=True),
),
]
|
dimamelnik22/drawfulru | game/admin.py | <gh_stars>0
from django.contrib import admin
from .models import Phrase, PhrasePack, PlayedGame, OnlineGame, Image, Round, PlayersChoice, PlayersNewPhrase
# Register your models here.
admin.site.register(Phrase)
admin.site.register(PhrasePack)
admin.site.register(PlayedGame)
admin.site.register(OnlineGame)
admin.site.register(Round)
admin.site.register(Image)
admin.site.register(PlayersChoice)
admin.site.register(PlayersNewPhrase) |
dimamelnik22/drawfulru | game/migrations/0003_auto_20191217_0708.py | <gh_stars>0
# Generated by Django 3.0 on 2019-12-17 04:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0002_phrase_author'),
]
operations = [
migrations.AlterField(
model_name='phrase',
name='rating',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=3),
),
migrations.AlterField(
model_name='phrasepack',
name='rating',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=3),
),
]
|
dimamelnik22/drawfulru | game/migrations/0004_onlinegame.py | <reponame>dimamelnik22/drawfulru<filename>game/migrations/0004_onlinegame.py
# Generated by Django 3.0 on 2019-12-22 01:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0002_auto_20191217_1342'),
('game', '0003_auto_20191217_0708'),
]
operations = [
migrations.CreateModel(
name='OnlineGame',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isStarted', models.BooleanField(default=False)),
('password', models.CharField(max_length=20)),
('curround', models.IntegerField(default=0)),
('phrasePack', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='game.PhrasePack')),
('players', models.ManyToManyField(to='user_profile.Profile')),
],
),
]
|
dimamelnik22/drawfulru | game/migrations/0007_auto_20191223_0240.py | # Generated by Django 3.0 on 2019-12-22 23:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user_profile', '0005_auto_20191222_1911'),
('game', '0006_remove_image_name'),
]
operations = [
migrations.RemoveField(
model_name='onlinegame',
name='curround',
),
migrations.CreateModel(
name='Round',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('finished', models.BooleanField(default=False)),
('mainscore', models.IntegerField(default=0)),
('score1', models.IntegerField(default=0)),
('score2', models.IntegerField(default=0)),
('score3', models.IntegerField(default=0)),
('score4', models.IntegerField(default=0)),
('score5', models.IntegerField(default=0)),
('score6', models.IntegerField(default=0)),
('score7', models.IntegerField(default=0)),
('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='game.Image')),
('phrases', models.ManyToManyField(to='game.Phrase')),
('player', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='user_profile.Profile')),
],
),
migrations.AddField(
model_name='onlinegame',
name='rounds',
field=models.ManyToManyField(to='game.Round'),
),
]
|
dimamelnik22/drawfulru | game/migrations/0001_initial.py | # Generated by Django 3.0 on 2019-12-16 20:56
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('user_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Phrase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('gamesCount', models.IntegerField(default=0)),
('rating', models.FloatField(default=0.0)),
],
),
migrations.CreateModel(
name='PhrasePack',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('rating', models.FloatField(default=0.0)),
('gamesCount', models.IntegerField(default=0)),
('phrases', models.ManyToManyField(to='game.Phrase')),
],
),
migrations.CreateModel(
name='PlayedGame',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('playDate', models.DateField(default=datetime.date.today, verbose_name='Date')),
('phrasePack', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='game.PhrasePack')),
('players', models.ManyToManyField(to='user_profile.Profile')),
],
),
]
|
dimamelnik22/drawfulru | user_profile/migrations/0005_auto_20191222_1911.py | # Generated by Django 3.0 on 2019-12-22 16:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('user_profile', '0004_profile_name'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
10sr/suniq | sample/gen_data.py | #!/usr/bin/env python3
from sys import argv
from random import randint
chars = ["a", "b", "c", "d", "\n"]
with open(argv[1], mode="w") as f:
for i in range(10000*1000):
print(chars[randint(0, 4)], end="", file=f)
|
blech/Python-Calendar-Converter | calendar.py | <reponame>blech/Python-Calendar-Converter<filename>calendar.py
# This Python file uses the following encoding: utf-8
# MIT Licensed 2013 <NAME>
from datetime import datetime
import roman
class Calendar(object):
# Correct input "<number in month> <month> <year>"
def to_french_revolutionary(self, date):
# Check if date is string
if not isinstance(date, str):
return -1
# Split date into array
date_split = date.split()
# Check to see if new array has correct length (3)
if len(date_split) != 3:
return -2
date_dict = {
"day": date_split[0],
"month": date_split[1],
"year": date_split[2]
}
date_dict["month"] = date_dict["month"].upper()
# Converting month to number and making sure that the month is infact a month
date_dict["month"] = self.__is_month__(date_dict["month"])
if not date_dict["month"]:
return - 4
return self.__to_french_from_dict(date_dict)
def __to_french_from_dict(self, date_dict):
# Attempting to convert day to int
try:
date_dict["day"] = int(date_dict["day"])
except:
return -3
# Attempting to convert the year to an int
try:
date_dict["year"] = int(date_dict["year"])
except:
return -5
# Testing to see if date is after start of calendar
if date_dict["year"] == 1792:
if date_dict["month"] < 9:
return -6
elif date_dict["month"] == 9:
if date_dict["day"] < 22:
return -6
# Converting year to French Revolutionary
if date_dict["month"] < 9:
date_dict["french_year"] = date_dict["year"] - 1792
elif date_dict["month"] == 9 and date_dict["day"] < 22:
date_dict["french_year"] = date_dict["year"] - 1792
else:
date_dict["french_year"] = date_dict["year"] - 1791
if date_dict["french_year"] > 4999:
return -7
# Converting days + month to days_passed
date_dict["days_passed"] = self.__days_passed__(date_dict["day"], date_dict["month"], self.__is_gregorian_leap_year__(date_dict["year"]))
french_date = self.__to_french__(date_dict["days_passed"], date_dict["french_year"])
return french_date
def to_french_revolutionary_datetime(self, dt):
if not isinstance(dt, datetime):
return -1
date_dict = {
"day": dt.day,
"month": dt.month,
"year": dt.year
}
return self.__to_french_from_dict(date_dict)
def __to_french__(self, days_passed, year):
roman_year = roman.toRoman(year)
# Error Check
if days_passed > 366:
return -8
# Look for Complementary Days
if days_passed == 366:
return "La Fête de la Révolution de l'Année " + roman_year + " de la Revolution"
elif days_passed == 365:
return "La Fête des Récompenses de l'Année " + roman_year + " de la Revolution"
elif days_passed == 364:
return "La Fête de l'Opinion de l'Année " + roman_year + " de la Revolution"
elif days_passed == 363:
return "La Fête du Travail de l'Année " + roman_year + " de la Revolution"
elif days_passed == 362:
return "La Fête du Génie de l'Année de l'Année " + roman_year + " de la Revolution"
elif days_passed == 361:
return "La Fête de la Vertu de l'Année de l'Année " + roman_year + " de la Revolution"
days_in_month = days_passed % 30
month_number = ((days_passed - days_in_month) / 30) + 1
month_name = ""
if days_in_month == 0:
month_number -= 1
days_in_month = 30
days_in_decade = days_in_month % 10
day_name = ""
decade = ((days_in_month - days_in_decade) / 10) + 1
if days_in_decade == 0:
decade -= 1
days_in_decade = 10
decade_name = roman.toRoman(decade)
if month_number == 1:
month_name = "Vendémiaire"
elif month_number == 2:
month_name = "Brumaire"
elif month_number == 3:
month_name = "Frimaire"
elif month_number == 4:
month_name = "Nivôse"
elif month_number == 5:
month_name = "Pluviôse"
elif month_number == 6:
month_name = "Ventôse"
elif month_number == 7:
month_name = "Germinal"
elif month_number == 8:
month_name = "Floréal"
elif month_number == 9:
month_name = "Prairial"
elif month_number == 10:
month_name = "Messidor"
elif month_number == 11:
month_name = "Thermidor"
elif month_number == 12:
month_name = "Fructidor"
if days_in_decade == 1:
day_name = "Primidi"
if days_in_decade == 2:
day_name = "Duodi"
if days_in_decade == 3:
day_name = "Tridi"
if days_in_decade == 4:
day_name = "Quartidi"
if days_in_decade == 5:
day_name = "Quintidi"
if days_in_decade == 6:
day_name = "Sextidi"
if days_in_decade == 7:
day_name = "Septidi"
if days_in_decade == 8:
day_name = "Octidi"
if days_in_decade == 9:
day_name = "Nonidi"
if days_in_decade == 10:
day_name = "Décadi"
return "Décade " + decade_name + ", " + day_name +" de " + month_name + " de l'Année " + roman_year + " de la Revolution."
def __days_passed__(self, day, month, leap):
days_passed = day
if month < 9 or (month == 9 and day < 22):
if month > 1:
days_passed += 31
if month > 2 and not leap:
days_passed += 28
if month > 2 and leap:
days_passed += 29
if month > 3:
days_passed += 31
if month > 4:
days_passed += 30
if month > 5:
days_passed += 31
if month > 6:
days_passed += 30
if month > 7:
days_passed += 31
if month > 8:
days_passed += 21
days_passed += 101
else:
if month == 9:
days_passed = day - 21
elif month == 10:
days_passed = 9 + day
elif month == 11:
days_passed = 9 + 31 + day
elif month == 12:
days_passed = 9 + 31 + 30 + day
return days_passed
def __is_gregorian_leap_year__(self, year):
if year % 4 == 0:
return True
else:
return False
def __is_month__(self, month):
if month == "JANUARY":
return 1
elif month == "FEBRUARY":
return 2
elif month == "MARCH":
return 3
elif month == "APRIL":
return 4
elif month == "MAY":
return 5
elif month == "JUNE":
return 6
elif month == "JULY":
return 7
elif month == "AUGUST":
return 8
elif month == "SEPTEMBER":
return 9
elif month == "OCTOBER":
return 10
elif month == "NOVEMBER":
return 11
elif month == "DECEMBER":
return 12
else:
return False |
FrancescoFabiano/E-PDDL | EPDDL.py | <gh_stars>1-10
#!/usr/bin/env python
# Four spaces as indentation [no tabs]
import re
import itertools
import warnings
import copy
from pathlib import Path
from action import Action
class EPDDL_Parser:
SUPPORTED_REQUIREMENTS = [':strips', ':negative-preconditions', ':typing', ':no-duplicates', ':mep']
#-----------------------------------------------
# Tokens
#-----------------------------------------------
def scan_tokens(self, filename):
try:
with open(filename,'r') as f:
# Remove single line comments
str = re.sub(r';.*$', '', f.read(), flags=re.MULTILINE).lower()
str = re.sub(r'\[([^[]+)-agent(\s+|)\]', r'[\1]',str,flags=re.MULTILINE)
nb_rep = 1
while (nb_rep):
(str, nb_rep) = re.subn(r'\((\s|)+\(([^()]+)\)(\s|)+\)', r'\2',str,flags=re.MULTILINE)
nb_rep = 1
while (nb_rep):
(str, nb_rep) = re.subn(r'(\[[^[]+\])\(([^(]+)\)', r'\1\2',str,flags=re.MULTILINE)
# Tokenize
stack = []
list = []
isBF = 0
insideBF = 0
firstAg = 1
countSqPa = 0
multi_ag = 0
Bf_string = ''
for t in re.findall(r'[()\[\]]|[^\s()\[\]]+', str):
if t == '(':
stack.append(list)
list = []
elif t == ')':
if stack:
l = list
list = stack.pop()
list.append(l)
else:
raise Exception('Missing open parentheses')
elif t == '[':
firstAg = 1
insideBF = 1
Bf_string = 'B('
elif t == ']':
insideBF = 0
Bf_string += ','
if multi_ag == 1:
Bf_string = Bf_string.replace('B(', 'C(')
list.append(Bf_string)
multi_ag = 0
elif insideBF == 1:
if firstAg == 0:
multi_ag = 1
Bf_string +=','
Bf_string +=t
firstAg = 0
else:
list.append(t)
if stack:
raise Exception('Missing close parentheses')
if len(list) != 1:
raise Exception('Malformed expression')
return list[0]
except Exception as e: print(e)
#-----------------------------------------------
# Parse domain
#-----------------------------------------------
def parse_domain(self, domain_filename):
tokens = self.scan_tokens(domain_filename)
if type(tokens) is list and tokens.pop(0) == 'define':
self.domain_name = 'unknown'
self.requirements = []
self.types = {}
self.objects = {}
self.actions = []
self.predicates = {}
while tokens:
group = tokens.pop(0)
t = group.pop(0)
if t == 'domain':
self.domain_name = group[0]
elif t == ':requirements':
for req in group:
if not req in self.SUPPORTED_REQUIREMENTS:
raise Exception('Requirement ' + req + ' not supported')
self.requirements = group
elif t == ':constants':
self.parse_objects(group, t)
elif t == ':predicates':
self.parse_predicates(group)
elif t == ':types':
self.parse_types(group)
elif t == ':action':
self.parse_action(group)
else: self.parse_domain_extended(t, group)
else:
raise Exception('File ' + domain_filename + ' does not match domain pattern')
def parse_domain_extended(self, t, group):
print(str(t) + ' is not recognized in domain')
#-----------------------------------------------
# Parse hierarchy
#-----------------------------------------------
def parse_hierarchy(self, group, structure, name, redefine):
list = []
while group:
if redefine and group[0] in structure:
raise Exception('Redefined supertype of ' + group[0])
elif group[0] == '-':
if not list:
raise Exception('Unexpected hyphen in ' + name)
group.pop(0)
type = group.pop(0)
if not type in structure:
structure[type] = []
structure[type] += list
list = []
else:
list.append(group.pop(0))
if list:
if not 'object' in structure:
structure['object'] = []
structure['object'] += list
def parse_hierarchy_ag(self, group, structure, name, redefine):
list = []
while group:
if redefine and group[0] in structure:
raise Exception('Redefined supertype of ' + group[0])
elif group[0] == '-':
raise Exception('Unexpected hyphen in ' + name)
else:
list.append(group.pop(0))
if list:
if not 'agent' in structure:
structure['agent'] = []
structure['agent'] += list
#-----------------------------------------------
# Parse objects
#-----------------------------------------------
def parse_objects(self, group, name):
self.parse_hierarchy(group, self.objects, name, False)
def parse_agents(self, group, name):
self.parse_hierarchy_ag(group, self.objects, name, False)
# -----------------------------------------------
# Parse types
# -----------------------------------------------
def parse_types(self, group):
self.parse_hierarchy(group, self.types, 'types', True)
#-----------------------------------------------
# Parse predicates
#-----------------------------------------------
def parse_predicates(self, group):
for pred in group:
predicate_name = pred.pop(0)
if predicate_name in self.predicates:
raise Exception('Predicate ' + predicate_name + ' redefined')
arguments = {}
untyped_variables = []
while pred:
t = pred.pop(0)
if t == '-':
if not untyped_variables:
raise Exception('Unexpected hyphen in predicates')
type = pred.pop(0)
while untyped_variables:
arguments[untyped_variables.pop(0)] = type
else:
untyped_variables.append(t)
while untyped_variables:
arguments[untyped_variables.pop(0)] = 'object'
self.predicates[predicate_name] = arguments
#-----------------------------------------------
# Parse action
#-----------------------------------------------
def parse_action(self, group):
name = group.pop(0)
if not type(name) is str:
raise Exception('Action without name definition')
for act in self.actions:
if act.name == name:
raise Exception('Action ' + name + ' redefined')
parameters = []
act_type = 'ontic'
positive_preconditions = []
negative_preconditions = []
add_effects = []
del_effects = []
f_obs = []
p_obs = []
derive_cond = []
explicit_eff = []
extensions = None
while group:
t = group.pop(0)
if t == ':parameters':
if not type(group) is list:
raise Exception('Error with ' + name + ' parameters')
parameters = []
untyped_parameters = []
p = group.pop(0)
while p:
t = p.pop(0)
if t == '-':
if not untyped_parameters:
raise Exception('Unexpected hyphen in ' + name + ' parameters')
ptype = p.pop(0)
while untyped_parameters:
parameters.append([untyped_parameters.pop(0), ptype])
else:
untyped_parameters.append(t)
while untyped_parameters:
parameters.append([untyped_parameters.pop(0), 'object'])
elif t == ':act_type':
act_type = self.assign_act_type(group.pop(0))
elif t == ':precondition':
self.split_predicates(group.pop(0), positive_preconditions, negative_preconditions, name, ' preconditions')
elif t == ':effect':
#self.split_effects(group.pop(0), add_effects, del_effects, name, ' effects')
self.recoursive_reading(group.pop(0), [['']], [['']], [['']], 0, add_effects, del_effects, name, ' effects')
# print(str([list(i) for i in add_effects]))
# print(str([list(i) for i in del_effects]))
elif t == ':observers':
#self.read_observer(group.pop(0), f_obs, name, ' agents')
self.recoursive_reading(group.pop(0), [['']], [['']], [['']], 0, f_obs, [], name, ' agents')
elif t == ':p_observers':
self.recoursive_reading(group.pop(0), [['']], [['']], [['']], 0, p_obs, [], name, ' agents')
elif t == ":derive":
derive_cond = group.pop(0)
elif t == ":exp_effect":
explicit_eff = group.pop(0)
else: extensions = self.parse_action_extended(t, group)
self.actions.append(Action(name, act_type, parameters, positive_preconditions, negative_preconditions, add_effects, del_effects, f_obs, p_obs, derive_cond, explicit_eff, extensions))
def parse_action_extended(self, t, group):
print(str(t) + ' is not recognized in action')
#-----------------------------------------------
# Parse problem
#-----------------------------------------------
def parse_problem(self, problem_filename):
#Default depth value
self.depth = 2
def frozenset_of_tuples(data):
return frozenset([tuple(t) for t in data])
tokens = self.scan_tokens(problem_filename)
if type(tokens) is list and tokens.pop(0) == 'define':
self.problem_name = 'unknown'
self.state = frozenset()
self.positive_goals = frozenset()
self.negative_goals = frozenset()
while tokens:
group = tokens.pop(0)
t = group.pop(0)
if t == 'problem':
self.problem_name = group[0]
elif t == ':domain':
if self.domain_name != group[0]:
raise Exception('Different domain specified in problem file')
elif t == ':requirements':
pass # Ignore requirements in problem, parse them in the domain
elif t == ':objects':
self.parse_objects(group, t)
elif t == ':agents':
self.parse_agents(group, t)
elif t == ':depth':
self.depth = group[0]
elif t == ':init':
init = []
# tmp_group = []
# tmp_group.insert(0, 'and')
# tmp_group.insert(1, group)
group.insert(0,'and')
self.split_predicates(group, init, [], '', 'init')
self.state = init
elif t == ':goal':
positive_goals = []
negative_goals = []
group.insert(0,'and')
self.split_predicates(group, positive_goals, negative_goals, '', 'goals')
self.positive_goals = positive_goals
self.negative_goals = negative_goals
else: self.parse_problem_extended(t, group)
else:
raise Exception('File ' + problem_filename + ' does not match problem pattern')
def parse_problem_extended(self, t, group):
print(str(t) + ' is not recognized in problem')
#-----------------------------------------------
# Split predicates
#-----------------------------------------------
def split_predicates(self, group, positive, negative, name, part):
if not type(group) is list:
raise Exception('Error with ' + name + part)
if group[0] == 'and':
group.pop(0)
else:
group = [group]
for predicate in group:
if 'B(' in predicate[0] or 'C(' in predicate[0]:
if type(predicate[1]) is list:
if predicate[1][0] == 'not':
if len(predicate[1][1]) > 0:
i = 0
tmp_predicate=[]
tmp_predicate.insert(0,predicate[0])
while i < len(predicate[1][1]):
if (i == 0):
tmp_predicate.insert(i+1,'-'+predicate[1][1][0])
else:
tmp_predicate.insert(i+1,predicate[1][1][i])
i = i+1
predicate = tmp_predicate
else:
raise Exception('Expected predicate after a \'not\'')
if predicate[0] == 'not':
if len(predicate) != 2:
raise Exception('Unexpected not in ' + name + part)
negative.append(predicate[-1])
else:
positive.append(predicate)
def recoursive_reading(self, body, head_positive, head_negative, diff, subProcedure, positive, negative, name, part):
if not type(body) is list:
raise Exception('Error with ' + name + part)
if body[0] == 'and':
body.pop(0)
and_count = 0
total_body = []
while and_count < len(body):
total_body.append(self.recoursive_reading(body[and_count], head_positive, head_negative, diff, subProcedure, positive, negative, name, part))
and_count = and_count + 1
#print("Total body: " + str(total_body))
ret = ([],[])
for elem in total_body:
if elem:
# print("Elem: " + str(elem))
if elem[1] == 0:
ret[0].append(elem[0])
else:
ret[1].append(elem[0])
return ret
elif body[0] == 'when':
body.pop(0)
condition = body[0]
body.pop(0)
#if type(condition) is list:
if (condition[0] == 'when' or condition[0] == 'forall'):
raise Exception('Error with ' + name + part + ' you cannot embed other keywords, other than \'and\', in the \'when\' condition')
elif condition[0] == 'and':
condition = self.recoursive_reading(condition, [['']], [['']], [['']], 1, positive, negative, name, part)
pos_condition = condition[0]
neg_condition = condition[1]
elif condition[0] == 'not':
condition.pop(0)
neg_condition = condition
pos_condition = [['']]
else:
pos_condition = [condition]
neg_condition = [['']]
rule = body[0]
body.pop(0)
if (rule[0] == 'when' or rule[0] == 'forall'):
raise Exception('Error with ' + name + part + ' you cannot embed other keywords, other than \'and\', in the \'when\' body')
self.recoursive_reading(rule,pos_condition,neg_condition, diff, subProcedure, positive, negative, name, part)
return(rule,pos_condition,neg_condition)
elif body[0] == 'forall':
if part != ' agents':
raise Exception('\'Forall\' keyword only implemented for agents')
else:
body.pop(0)
head = body[0]
body.pop(0)
#if type(condition) is list:
#make sense inside forall
if head[0] == 'diff':
head.pop(0)
if len(head) != 2:
raise Exception('Bad \'diff\' construction')
else:
diff = [head[1]]
head = head[0]
if (head[0] == 'when' or head[0] == 'forall' or head[0] == 'and' or head[0] == 'not'):
raise Exception('Error with ' + name + part + ' you cannot embed other keywords in the \'forall\' condition')
else:
fa_start = "FASTART"
fa_stop = "FASTOP"
rule = body[0]
body.pop(0)
for v in head:
if '?' in v:
if v in rule:
rule[rule.index(v)] = fa_start + rule[rule.index(v)] + fa_stop
self.recoursive_reading(rule,[['']], [['']],[['']], subProcedure, positive, negative, name, part)
elif rule[0] == 'when':
parsed_rule = self.recoursive_reading(rule,[['']], [['']],[['']], 1, positive, negative, name, part)
i = 0
while i < 3:
if i > 0:
j = 0
while j < len(parsed_rule[i]):
if v in parsed_rule[i][j]:
parsed_rule[i][j][parsed_rule[i][j].index(v)] = fa_start + parsed_rule[i][j][parsed_rule[i][j].index(v)] + fa_stop
j = j+1
else:
if v in parsed_rule[i]:
parsed_rule[i][parsed_rule[i].index(v)] = fa_start + parsed_rule[i][parsed_rule[i].index(v)] + fa_stop
i = i+1
self.recoursive_reading(parsed_rule[0],parsed_rule[1],parsed_rule[2], diff, subProcedure, positive, negative, name, part)
else:
raise Exception('To many nested command in the agents\' observability')
elif body[0] == 'not':
if len(body) != 2:
raise Exception('Unexpected not in ' + name + part)
if subProcedure == 0:
negative.append((body[-1], head_positive, head_negative,diff))
return (body[-1], 1)
else:
if subProcedure == 0:
positive.append((body, head_positive, head_negative,diff))
return (body, 0)
def assign_act_type(self, name):
name = name.lower()
if name == 'ontic' or name == 'announcement' or name == 'sensing':
return name.lower()
else:
raise Exception('Error with the action type definition. Please select one of the following: \'ontic\', \'sensing\', \'announcement\'')
#-----------------------------------------------
# Print EFP
#-----------------------------------------------
def print_EFP(self):
#########File NAME
output_folder = "out/efp"
Path(output_folder).mkdir(exist_ok=True)
file_name = self.domain_name + '_' + self.problem_name
out = open(output_folder + "/" + file_name+".txt", "w")
out.write("%This file is automatically generated from an E-PDDL specification and follows the mAp syntax.\n\n")
#Generate grounded actions and add grounded fluents
fluents = set()
ground_actions = []
for action in parser.actions:
for act in action.groundify(parser.objects, parser.types, self.requirements, fluents):
act_name = act.name
for parameter in act.parameters:
act_name += '_'+parameter
act.name = act_name
ground_actions.append(act)
#########FLuents
self.generate_fluents_EFP(fluents)
if '' in fluents:
fluents.remove('')
out.write('%%%%%%%%%%%%%%%%%%%%%%%%% FLUENTS %%%%%%%%%%%%%%%%%%%%%%%%\n')
out.write('%Fluents generated from EPDDL by grounding each predicate (and cheking in :init, :goal and actions for extra predicates)\n')
out.write('%The fluents are lexicographically sorted and printed in sets of 10\n\n')
out.write('fluent ')
fl_count = 0
for fluent in sorted(fluents):
out.write(str(fluent))
if (fl_count != len(fluents)-1):
if((fl_count+1)%10 == 0):
out.write(';\nfluent ')
else:
out.write(', ')
fl_count +=1
out.write(';\n\n')
out.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n')
#########Actions Names
out.write('%%%%%%%%%%%%%%%%%%%%% ACTIONS\' NAMES %%%%%%%%%%%%%%%%%%%%%\n')
out.write('%Actions\' names generated from EPDDL by adding to each action names its grounded predicates\n\n')
out.write('action ')
act_count = 0
for action in ground_actions:
out.write(action.name)
if (act_count != len(ground_actions)-1):
if((act_count+1)%10 == 0):
out.write(';\naction ')
else:
out.write(', ')
act_count +=1
out.write(';\n\n')
out.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n')
out.write('%%%%%%%%%%%%%%%%%%%%% AGENTS\' NAMES %%%%%%%%%%%%%%%%%%%%%%\n')
out.write('%Agents\' names generated from EPDDL by looking at the \'agent\' predicate\n\n')
out.write('agent ')
ag_count = 0
for agent in self.objects['agent']:
out.write(agent)
if (ag_count != len(self.objects['agent'])-1):
if((ag_count+1)%10 == 0):
out.write(';\nagent ')
else:
out.write(', ')
ag_count +=1
out.write(';\n\n')
out.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n')
#########Actions Specifications
out.write('%%%%%%%%%%%%%%%%% ACTIONS\' SPECIFICATIONS %%%%%%%%%%%%%%%%\n')
out.write('%Actions\' specifications generated from EPDDL by grounding each action\'s definition\n\n')
for action in ground_actions:
out.write('%%%Action ' + action.name + '\n\n')
out.write('executable ' + action.name)
self.print_precondition_EFP(action, out)
self.print_effects_EFP(action, out)
self.print_observers_EFP(action, 1, out)
self.print_observers_EFP(action, 0, out)
out.write('\n%%%\n\n')
out.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n')
#########Actions Specifications
out.write('%%%%%%%%%%%%%%%%%% INITIAL FLUENTS TRUTH %%%%%%%%%%%%%%%%%%\n')
out.write('%Fluents are considered true when are inserted in :init; otherwise are considered false\n\n')
out.write('%%%True fluents\n')
out.write('initially ')
ini_count = 0
true_fluents = set()
belief_ini= set()
temp_ini = list(self.state)
for index, ini_f in enumerate(temp_ini):
ini_fs = self.unify_fluent_EFP(ini_f)
if 'B(' in ini_fs or 'C(' in ini_fs:
belief_ini.add(ini_fs)
else:
out.write(ini_fs)
true_fluents.add(ini_fs)
if ( (index+1 < len(temp_ini)) and ('B(' not in temp_ini[index+1][0] and 'C(' not in temp_ini[index+1][0])):
out.write(', ')
out.write(';\n')
neg_fluents = fluents - true_fluents
out.write('%%%False fluents\n')
out.write('initially ')
ini_count = 0
for ini_f in neg_fluents:
out.write('-'+ini_f)
if (ini_count != len(neg_fluents)-1):
out.write(', ')
ini_count+=1
out.write(';\n')
out.write('\n')
out.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n')
out.write('%%%%%%%%%%%%%%%%%% INITIAL BELIEFS TRUTH %%%%%%%%%%%%%%%%%%\n')
out.write('%Extracted from the :init field\n\n')
ini_count = 0
for ini_bf in belief_ini:
out.write('initially ')
out.write(ini_bf)
if (ini_count != len(belief_ini)-1):
out.write(';\n')
ini_count+=1
out.write(';\n')
out.write('\n')
out.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n\n')
out.write('%%%%%%%%%%%%%%%%%%%%%%%%%% GOALS %%%%%%%%%%%%%%%%%%%%%%%%%%\n')
out.write('%The goals of the plan. Each goal is presented separately to ease the reading\n\n')
for goal_f in self.positive_goals:
out.write('goal ')
goal_fs = self.unify_fluent_EFP(goal_f)
out.write(goal_fs + ';\n')
for goal_f in self.negative_goals:
out.write('goal ')
goal_fs = self.unify_fluent_EFP(goal_f)
out.write(goal_fs + ';\n')
out.write('\n')
out.write('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n')
out.close()
def unify_fluent_EFP(self,given_list):
return Action.unify_fluent_EFP(given_list)
def generate_fluents_EFP(self, fluents_set):
for ini_f in self.state:
fluent = self.unify_fluent_EFP(ini_f)
if 'B(' not in fluent and 'C(' not in fluent:
fluents_set.add(fluent)
for goal_f in self.positive_goals:
fluent = self.unify_fluent_EFP(goal_f)
if 'B(' not in fluent and 'C(' not in fluent:
fluents_set.add(fluent)
for goal_f in self.negative_goals:
fluent = self.unify_fluent_EFP(goal_f)
if 'B(' not in fluent and 'C(' not in fluent:
fluents_set.add(fluent)
#duplicates = True
duplicates = False
if ':no-duplicates' in self.requirements:
duplicates = False
for predicate in self.predicates.items():
#print('original:' + str(predicate))
type_map = []
variables = []
pred_ini=[]
pred_ini.append(predicate[0])
for var in self.predicates[predicate[0]]:
type = self.predicates[predicate[0]][var]
#print ('Type: ' + str(type) + ' var: ' + var + ' predicate: ' + predicate[0])
pred_ini.append(var)
type_stack = [type]
items = []
while type_stack:
t = type_stack.pop()
if t in parser.objects:
items += parser.objects[t]
elif t in parser.types:
type_stack += parser.types[t]
else:
raise Exception('Unrecognized type ' + t)
type_map.append(items)
variables.append(var)
for assignment in itertools.product(*type_map):
if (not duplicates and len(assignment) == len(set(assignment))) or duplicates:
#pred = predicate
pred = list(pred_ini)
iv = 0
# print(str(variables))
# print(str(assignment))
for v in variables:
while v in pred:
pred[pred.index(v)] = assignment[iv]
iv += 1
fluent = self.unify_fluent_EFP(pred)
if 'B(' not in fluent and 'C(' not in fluent:
fluents_set.add(fluent)
def print_precondition_EFP(self,action,out):
if (len(action.positive_preconditions)+len(action.negative_preconditions) > 0):
out.write(' if ' )
#+ str([list(i) for i in action.positive_preconditions]) + str([list(i) for i in action.negative_preconditions]))
self.subprint_precondition_EFP(action, 1, out)
self.subprint_precondition_EFP(action, 0, out)
out.write(';\n')
def reorder_bf_list(self, list):
ret = []
for elem in list:
if 'B(' in elem[0]:
ret.insert(0,elem)
else:
ret.append(elem)
return ret
def subprint_precondition_EFP(self,action,is_postive,out):
positive_pre = True
if (is_postive == 1):
preconditions = action.positive_preconditions
else:
positive_pre = False
preconditions = action.negative_preconditions
count = 0
preconditions = self.reorder_bf_list(preconditions)
for i in preconditions:
fluent = self.unify_fluent_EFP(i)
if (positive_pre):
out.write(fluent)
else:
out.write('-'+ fluent + '')
if (count < len(preconditions)-1) or (positive_pre and len(action.negative_preconditions) > 0):
out.write(', ')
count +=1
def print_effects_EFP(self,action,out):
if (action.act_type == 'sensing'):
act_type = ' determines '
elif (action.act_type == 'announcement'):
act_type = ' announces '
else:
act_type = ' causes '
if (len(action.add_effects) > 0):
for i in action.add_effects:
out.write(action.name + act_type)
fluent = self.unify_fluent_EFP(i[0])
out.write(fluent)
self.print_conditions_EFP(i[1],i[2],out)
out.write(';\n')
if (len(action.del_effects) > 0):
for i in action.del_effects:
out.write(action.name + act_type)
fluent = self.unify_fluent_EFP(i[0])
out.write('-'+ fluent + '')
self.print_conditions_EFP(i[1],i[2],out)
out.write(';\n')
def print_observers_EFP(self,action,fully,out):
if fully == 1:
obs_type = ' observes '
observers = action.observers
else:
obs_type = ' aware_of '
observers = action.p_observers
if (len(observers) > 0):
for ags in observers:
for ag in ags[0]:
if 'FASTART' in ag:
for agent in self.objects['agent']:
notPrint = 0
if ags[3][0][0] != '':
if agent == ags[3][0][0]:
notPrint = 1
if notPrint == 0:
tmp_cond = [[]]
self.copy_cond_list(ags,tmp_cond)
out.write(agent + obs_type + action.name)
self.substitute_ag(tmp_cond[1],agent)
self.substitute_ag(tmp_cond[2],agent)
self.print_conditions_EFP(tmp_cond[1],tmp_cond[2],out)
out.write(';\n')
else:
out.write(str(ag) + obs_type + action.name)
self.print_conditions_EFP(ags[1],ags[2],out)
out.write(';\n')
def copy_cond_list(self, agents, temp):
i = 0
while i < len(agents):
sub_temp = []
j = 0
while j < len(agents[i]):
if i > 0:
k = 0
sub_sub_temp = []
while k < len(agents[i][j]):
sub_sub_temp.insert(k,agents[i][j][k])
k = k+1
else:
sub_sub_temp = agents[i][j]
sub_temp.insert(j, sub_sub_temp)
j = j+1
temp.insert(i, sub_temp)
i = i+1
def substitute_ag(self, conds, agent):
for cond in conds:
for elem in cond:
if 'FASTART' in elem:
conds[conds.index(cond)][cond.index(elem)] = re.sub(r'(FASTART\S+FASTOP)', agent ,elem)
def print_conditions_EFP(self,pos_cond,neg_cond,out):
yet_to_print = 1
if self.subprint_cond_EFP(pos_cond,1,out, yet_to_print) == 1:
yet_to_print = 0;
self.subprint_cond_EFP(neg_cond,0,out, yet_to_print);
def subprint_cond_EFP(self,conditions,isPos,out, yet_to_print):
printed = 0
for condition in conditions:
if '' in condition:
condition.remove('')
for condition in conditions:
if not condition:
conditions.remove(condition)
if conditions:
count_cond = 0
if (yet_to_print == 1):
out.write( ' if ' )
printed = 1
else:
out.write(', ')
conditions = self.reorder_bf_list(conditions)
for condition in conditions:
cond = self.unify_fluent_EFP(condition)
if not isPos:
out.write('-')
out.write(cond)
if count_cond < len(conditions)-1:
out.write(', ')
count_cond = count_cond +1
return printed
#-----------------------------------------------
# Print PDKB
#-----------------------------------------------
def print_PDKB(self):
#########File NAME
output_folder = "out/pdkb"
Path(output_folder).mkdir(exist_ok=True)
self.print_domain_pdkb(output_folder)
self.print_problem_pdkb(output_folder)
def print_domain_pdkb(self, output_folder):
out = open(output_folder + "/" + self.domain_name+".pdkbpddl", "w")
out.write(";This file is automatically generated from an E-PDDL specification and follows the PDKB-PDDL syntax.\n\n")
out.write(';;;;;;;;;;;;;;;;;;;; DOMAIN\'S FEATURES ;;;;;;;;;;;;;;;;;;;\n\n')
out.write('(define (domain ' + self.domain_name +')\n\n')
count_types = 0
out.write('\t(:agents')
for elem in self.objects['agent']:
out.write(' ' + elem)
out.write(')\n\n')
out.write('\t(:constants)\n\n')
count_types = 0
out.write('\t(:types')
for elem in self.types:
for el in self.types[elem]:
out.write('\n\t ' + el)
if count_types == len(self.types[elem])-1:
out.write('\n\t')
count_types = count_types+1
out.write(')\n\n')
out.write('\t(:predicates')
for elem in self.predicates:
out.write( ' (' + elem)
for el in self.predicates[elem]:
out.write(' ' + el + ' - ',)
out.write(self.predicates[elem][el])
out.write(')')
out.write(')\n\n')
out.write(';;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n\n\n')
out.write(';;;;;;;;;;;;;;;;; ACTIONS\' SPECIFICATIONS ;;;;;;;;;;;;;;;;\n\n')
for action in parser.actions:
out.write(';;;Action ' + action.name + '\n\n')
out.write('\t(:action ' + action.name + '\n')
self.print_parameters_PDKB(action, out)
if not self.print_expl_derive_condition_PDKB(action,out):
self.print_derive_condition_PDKB(action, out)
self.print_precondition_PDKB(action, out)
if not self.print_expl_effects_PDKB(action,out):
self.print_effects_PDKB(action, out)
#self.print_observers_EFP(action, 0, out)
out.write('\t)\n;;;\n\n')
out.write(';;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;\n\n\n')
out.write(')')
def print_problem_pdkb(self, output_folder):
out = open(output_folder + "/" + self.problem_name+".pdkbpddl", "w")
out.write(";This file is automatically generated from an E-PDDL specification and follows the PDKB-PDDL syntax.\n\n")
out.write('{include:'+self.domain_name+'.pdkbpddl}\n\n')
out.write('(define (problem '+self.problem_name+')\n')
out.write('\n\t(:domain ' + self.domain_name + ')\n')
out.write('\n\t(:depth '+str(self.depth)+')\n')
if len(self.objects) > 1:
out.write('\n\t(:objects\n')
for obj in self.objects:
if obj != 'agent':
out.write('\t\t')
for elem in self.objects[obj]:
out.write(str(elem)+ ' ')
out.write('- ' + str(obj) + '\n')
out.write('\t)\n')
out.write('\n\t(:projection )\n')
out.write('\n\t(:task valid_generation)\n')
#init print
out.write('\n\t(:init-type complete)')
out.write('\n\t(:init')
t_depth = 1
while (t_depth <= int(self.depth)):
out.write("\n\n\t\t;Depth " + str(t_depth))
for ini_f in self.state:
ini_fs = self.unify_fluent_init_PDKB(ini_f,t_depth)
if ini_fs != '':
out.write('\n\n' + ini_fs)
t_depth+=1
out.write('\n\t)')
out.write('\n')
#goal print
out.write('\n\t(:goal ')
for goal_f in self.positive_goals:
goal_fs = self.unify_fluent_PDKB(goal_f)
out.write('\n\t ' + goal_fs)
for goal_f in self.negative_goals:
goal_fs = self.unify_fluent_PDKB(goal_f)
out.write('\n\t ' + goal_fs)
out.write('\n\t)')
out.write('\n)')
def print_parameters_PDKB(self, action, out):
out.write('\t\t:parameters\t\t\t (')
count_param = 0
for param in action.parameters:
out.write(param[0] + ' - ' + param[1])
count_param = count_param+1
if (count_param < len(action.parameters)):
out.write(' ')
out.write(')\n')
def print_expl_derive_condition_PDKB(self,action,out):
if (len(action.derive_cond) > 0):
out.write('\t\t:derive-condition\t (')
out.write(self.unify_fluent_PDKB(action.derive_cond, True))
out.write(')\n')
return True
return False
def print_derive_condition_PDKB(self,action,out):
out.write('\t\t:derive-condition\t (')
observers = action.observers
visited = False
if (len(observers) > 0):
for ags in observers:
for ag in ags[0]:
if 'FASTART' in ag:
if visited == True:
raise Exception('PDKB coversion cannot handle mutiple Fully Observants Group, make use of the explicit fields.')
visited = True
if len(ags[3]) >0:
print("\n********CONVERSION WARNING********")
print("The \'diff\' operator cannot be directly translated to PDKB and therefore will be ignored.")
print("You should make use of the more explicit fields.")
print("**********************************\n")
if len(ags[1]) == 1 and self.unify_fluent_PDKB(ags[1][0]) != '':
if ags[1] == ags[2]:
out.write('always')
else:
der_cond = self.unify_fluent_PDKB(ags[1][0])
der_cond = re.sub(r'FASTART(\S)+FASTOP', r'$agent$',der_cond)
out.write(der_cond)
elif len(ags[2]) == 1:
der_cond = self.unify_fluent_PDKB(ags[2][0])
der_cond = re.sub(r'FASTART(\S)+FASTOP', r'$agent$',der_cond)
out.write('!' + der_cond)
# raise Exception('PDKB coversion cannot handle negative condition for Fully Observants Groups, make use of the explicit fields.')
elif len(ags[1]) > 1 or len(ags[2]) >1:
raise Exception('PDKB coversion cannot handle mutiple Fully Observants Groups, make use of the explicit fields.')
else:
raise Exception('PDKB coversion cannot handle strange specification for Fully Observants Groups, make use of the explicit fields.')
#self.print_conditions_PDKB(ags[1],ags[2],out)
out.write(')\n')
if visited == False:
out.write('never)\n')
def print_precondition_PDKB(self,action,out):
#if (len(action.positive_preconditions)+len(action.negative_preconditions) > 0):
out.write('\t\t:precondition\t\t (and' )
#+ str([list(i) for i in action.positive_preconditions]) + str([list(i) for i in action.negative_preconditions]))
self.subprint_precondition_PDKB(action, 1, out)
self.subprint_precondition_PDKB(action, 0, out)
out.write(')\n')
def subprint_precondition_PDKB(self,action,is_postive,out):
positive_pre = True
if (is_postive == 1):
preconditions = action.positive_preconditions
else:
positive_pre = False
preconditions = action.negative_preconditions
for i in preconditions:
fluent = self.unify_fluent_PDKB(i)
if not positive_pre:
fluent = '!'+ fluent
if not '[' in fluent:
fluent = '('+ fluent + ')'
out.write(' '+ fluent)
def print_expl_effects_PDKB(self,action,out):
if (len(action.explicit_eff) > 0):
out.write('\t\t:effect\t\t\t\t (')
out.write(self.unify_fluent_PDKB(action.explicit_eff, True))
out.write(')\n')
return True
return False
def print_effects_PDKB(self,action,out):
# if (len(action.p_observers) > 0):
# print("\n********CONVERSION WARNING********")
# print("Partial observability cannot be directly translated to PDKB and therefore will be ignored.")
# print("You should make use of the more explicit fields.")
# print("**********************************\n")
out.write('\t\t:effect\t\t\t\t (and' )
is_ontic = True;
if (action.act_type == 'sensing' or action.act_type == 'announcement'):
is_ontic = False
printed = False
self.subprint_effects_PDKB(action, out, is_ontic, printed, True)
self.subprint_effects_PDKB(action, out, is_ontic, printed, False)
out.write(')\n')
def subprint_effects_PDKB(self,action,out,is_ontic,printed, is_pos):
ag_printed = False
p_ag_printed = False
count = 0
if (is_pos):
effects = action.add_effects
else:
effects = action.del_effects
if (len(effects) > 0):
for i in effects:
if count == 3:
count = 0
out.write('\n\t\t\t\t\t\t\t ')
fluent = self.unify_fluent_PDKB(i[0])
if self.print_conditions_PDKB(i[1],i[2],out) == 1:
printed = True
if (is_ontic):
count = count + 1
if (is_pos):
out.write(' ('+ fluent + ')')
else:
out.write(' (!'+ fluent + ')')
if (len(action.observers) > 0):
for ags in action.observers:
for ag in ags[0]:
if not 'FASTART' in ag:
if self.print_conditions_PDKB(ags[1],ags[2],out) == 1:
ag_printed = True
else:
ag_printed = False
if count == 3:
count = 0
out.write('\n\t\t\t\t\t\t\t ')
count = count + 1
out.write(' [' + ag + '](')
if (is_pos):
out.write(fluent + ')')
else:
out.write('!'+fluent + ')')
if (len(action.p_observers) > 0):
for p_ags in action.p_observers:
for p_ag in p_ags[0]:
if not 'FASTART' in p_ag:
if self.print_conditions_PDKB(p_ags[1],p_ags[2],out) == 1:
p_ag_printed = True
else:
p_ag_printed = False
if count == 3:
count = 0
out.write('\n\t\t\t\t\t\t\t ')
count = count + 1
out.write(' [' + p_ag + '][' + ag + '](or')
out.write(' ('+fluent + ')')
out.write(' (!'+fluent + '))')
else:
print("\n********CONVERSION WARNING********")
print("Partial observability has not fully integrated the FORALL operator.")
print("You should make use of the more explicit fields if the results are not correct.")
print("**********************************\n")
if p_ag_printed:
out.write(')')
p_ag_printed = False
else:
print("\n********CONVERSION WARNING********")
print("Observability has not fully integrated the FORALL operator.")
print("Please, check the resulting conversion.\nYou should make use of the more explicit fields if the results are not correct.")
print("**********************************\n")
if ag_printed:
out.write(')')
ag_printed = False
#print ("Obs of " + action.name + ": " + str(ags))
#for ag in ags[0]:
# if not 'FASTART' in ag:
# if count == 3:
# count = 0
# out.write('\n\t\t\t\t\t\t\t ')
# count = count + 1
# out.write(' [' + ag + '](')
# if (is_pos):
# out.write(fluent + ')')
# else:
# out.write('!'+fluent + ')')
elif not is_ontic:
raise Exception('Each action needs at least one fully observant agent.')
if printed == True:
out.write(')')
def print_conditions_PDKB(self,pos_cond,neg_cond,out):
yet_to_print = 1
if self.subprint_cond_PDKB(pos_cond,1,out, yet_to_print) == 1:
yet_to_print = 0;
if self.subprint_cond_PDKB(neg_cond,0,out, yet_to_print) == 1 or yet_to_print == 0:
out.write(")")
return 1
return 0
def subprint_cond_PDKB(self,conditions,isPos,out, yet_to_print):
printed = 0
for condition in conditions:
if '' in condition:
condition.remove('')
for condition in conditions:
if not condition:
conditions.remove(condition)
if conditions:
count_cond = 0
if (yet_to_print == 1):
out.write( ' (when (and (' )
printed = 1
else:
out.write(' (')
for condition in conditions:
cond = self.unify_fluent_PDKB(condition)
if not isPos:
out.write('!')
out.write(cond)
if count_cond < len(conditions):
out.write(')')
count_cond = count_cond +1
return printed
def unify_fluent_init_PDKB(self,given_list, t_depth, no_change = False):
ret = ''
count = 1
found = False
to_reprint = False
if t_depth == 1:
to_reprint = True
new_list = copy.deepcopy(given_list)
for elem in given_list:
if 'C(' in elem:
if len(self.objects['agent']) == elem.count(','):
to_reprint = True
if not found:
found = True
else:
return ('\t\t('+ self.unify_fluent_PDKB(given_list)+') ')
del new_list[new_list.index(elem)]
while count <= t_depth:
ag_name = '?ag'+str(count)
new_list.insert(0,'B('+ag_name+',')
t_count = 0
ret += ('\t\t(forall ' + ag_name + ' - agent\n')
while t_count < count:
ret += '\t'
t_count += 1
count += 1
if to_reprint:
ret+='\t\t'
if not found:
ret+= '('
count += 1
ret += self.unify_fluent_PDKB(new_list)
while count > 1:
ret += ')'
count -= 1
#ret+='\n'
return ret
def unify_fluent_PDKB(self,given_list, no_change = False):
return Action.unify_fluent_PDKB(given_list, no_change, False)
#-----------------------------------------------
# Main
#-----------------------------------------------
if __name__ == '__main__':
import sys, pprint
domain = sys.argv[1]
problem = sys.argv[2]
parser = EPDDL_Parser()
# print('----------------------------')
# pprint.pprint(parser.scan_tokens(domain))
# print('----------------------------')
# pprint.pprint(parser.scan_tokens(problem))
# print('----------------------------')
parser.parse_domain(domain)
parser.parse_problem(problem)
parser.print_EFP()
print("\nThe given files have been correctly converted to mAp.")
print("The resulting file, called \'" +parser.domain_name+"_"+parser.problem_name+".txt\', is in the \'out\efp\' folder.\n")
parser.print_PDKB()
print("\nThe given files have been correctly converted to PDKB-PDDL.")
print("The resulting files, called \'" +parser.domain_name+".pdkpddl\' and \'" +parser.problem_name+".pdkpddl\', are in the \'out\pdkb\' folder.\n")
# print('State: ' + str(parser.state))
# for act in parser.actions:
# print(act)
# for action in parser.actions:
# for act in action.groundify(parser.objects, parser.types, parser.requirements, fluents):
# print(act)
# print('----------------------------')
# print('Problem name: ' + parser.problem_name)
# print('Objects: ' + str(parser.objects))
#print('Predicates: ' + str(parser.predicates)
# print('State: ' + str(parser.state))
# print('Positive goals: ' + str(parser.positive_goals))
# print('Negative goals: ' + str(parser.negative_goals))
|
mkocot/fungus | pelicanconf.py | <filename>pelicanconf.py
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'm'
SITENAME = u'FungUs'
SITEURL = ''
PATH = 'content'
STATIC_PATHS = ["bua", "cake"]
TIMEZONE = 'Europe/Warsaw'
TYPOGRIFY = True
THEME = "themes/pl/notmyidea"
DEFAULT_LANG = u'pl'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
#LINKS = (('Pelican', 'http://getpelican.com/'),
# ('Python.org', 'http://python.org/'),
# ('Jinja2', 'http://jinja.pocoo.org/'),
# ('You can modify those links in your config file', '#'),)
# Social widget
#SOCIAL = (('You can add links in your config file', '#'),
# ('Another social link', '#'),)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
JINJA_ENVIRONMENT = {
'extensions': ['jinja2.ext.i18n']
}
PLUGINS = ['i18n_subsites']
PLUGIN_PATHS = ['pelican-plugins']
I18N_SUBSITES = {
'en': {
"THEME": "themes/en/notmyidea",
"STATIC_PATHS": STATIC_PATHS,
}
}
|
liuxb555/earthengine-py-examples | Datasets/Water/jrc_yearly_history.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
dataset = ee.ImageCollection('JRC/GSW1_1/YearlyHistory') \
.filter(ee.Filter.date('2015-01-01', '2015-12-31'))
waterClass = dataset.select('waterClass')
waterClassVis = {
'min': 0.0,
'max': 3.0,
'palette': ['cccccc', 'ffffff', '99d9ea', '0000ff'],
}
Map.setCenter(59.414, 45.182, 7)
Map.addLayer(waterClass, waterClassVis, 'Water Class')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | ImageCollection/landsat_simple_composite.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Composite 6 months of Landsat 8.
# Note that the input to simpleComposite is raw data.
l8 = ee.ImageCollection('LANDSAT/LC08/C01/T1')
# The asFloat parameter gives floating-point TOA output instead of
# the UINT8 outputs of the default simpleComposite().
composite = ee.Algorithms.Landsat.simpleComposite(**{
'collection': l8.filterDate('2015-1-1', '2015-7-1'),
'asFloat': True
})
# Pick a spot with lots of clouds.
Map.setCenter(-47.6735, -0.6344, 12)
# Display a composite with a band combination chosen from:
# https:#landsat.usgs.gov/how-do-landsat-8-band-combinations-differ-landsat-7-or-landsat-5-satellite-data
Map.addLayer(composite, {'bands': ['B6', 'B5', 'B4'], 'max': [0.3, 0.4, 0.3]})
# Display the map.
Map
|
liuxb555/earthengine-py-examples | AssetManagement/export_FeatureCollection.py | <reponame>liuxb555/earthengine-py-examples
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
fromFT = ee.FeatureCollection('ft:1CLldB-ULPyULBT2mxoRNv7enckVF0gCQoD2oH7XP')
polys = fromFT.geometry()
centroid = polys.centroid()
lng, lat = centroid.getInfo()['coordinates']
print("lng = {}, lat = {}".format(lng, lat))
Map.setCenter(lng, lat, 10)
Map.addLayer(fromFT)
taskParams = {
'driveFolder': 'image',
'fileFormat': 'KML' # CSV, KMZ, GeoJSON
}
# export all features in a FeatureCollection as one file
task = ee.batch.Export.table(fromFT, 'export_fc', taskParams)
task.start()
# # export each feature in a FeatureCollection as an individual file
# count = fromFT.size().getInfo()
# for i in range(2, 2 + count):
# fc = fromFT.filter(ee.Filter.eq('system:index', str(i)))
# task = ee.batch.Export.table(fc, 'watershed-' + str(i), taskParams)
# task.start()
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Image/image_stats_by_band.py | <filename>Image/image_stats_by_band.py
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
image = ee.Image('USDA/NAIP/DOQQ/m_3712213_sw_10_1_20140613')
Map.setCenter(-122.466123, 37.769833, 17)
Map.addLayer(image, {'bands': ['N', 'R','G']}, 'NAIP')
geometry = image.geometry()
means = image.reduceRegions(geometry, ee.Reducer.mean().forEachBand(image), 10)
print(means.getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/Terrain/srtm_mtpi.py | <gh_stars>10-100
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
dataset = ee.Image('CSP/ERGo/1_0/Global/SRTM_mTPI')
srtmMtpi = dataset.select('elevation')
srtmMtpiVis = {
'min': -200.0,
'max': 200.0,
'palette': ['0b1eff', '4be450', 'fffca4', 'ffa011', 'ff0000'],
}
Map.setCenter(-105.8636, 40.3439, 11)
Map.addLayer(srtmMtpi, srtmMtpiVis, 'SRTM mTPI')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Image/pansharpen.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load a Landsat 8 top-of-atmosphere reflectance image.
image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318')
Map.addLayer(
image,
{'bands': ['B4', 'B3', 'B2'], 'min': 0, 'max': 0.25, 'gamma': [1.1, 1.1, 1]},
'rgb')
# Convert the RGB bands to the HSV color space.
hsv = image.select(['B4', 'B3', 'B2']).rgbToHsv()
# Swap in the panchromatic band and convert back to RGB.
sharpened = ee.Image.cat([
hsv.select('hue'), hsv.select('saturation'), image.select('B8')
]).hsvToRgb()
# Display the pan-sharpened result.
Map.setCenter(-122.44829, 37.76664, 13)
Map.addLayer(sharpened,
{'min': 0, 'max': 0.25, 'gamma': [1.3, 1.3, 1.3]},
'pan-sharpened')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/Vectors/global_land_ice_measurements.py | <filename>Datasets/Vectors/global_land_ice_measurements.py
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
dataset = ee.FeatureCollection('GLIMS/current')
visParams = {
'palette': ['gray', 'cyan', 'blue'],
'min': 0.0,
'max': 10.0,
'opacity': 0.8,
}
image = ee.Image().float().paint(dataset, 'area')
Map.setCenter(-35.618, 66.743, 7)
Map.addLayer(image, visParams, 'GLIMS/current')
# Map.addLayer(dataset, {}, 'for Inspector', False)
# Display the map.
Map
|
liuxb555/earthengine-py-examples | FeatureCollection/add_new_attribute.py | <reponame>liuxb555/earthengine-py-examples
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# This function creates a new property that is the sum of two existing properties.
def addField(feature):
sum = ee.Number(feature.get('property1')).add(feature.get('property2'))
return feature.set({'sum': sum})
# Create a FeatureCollection from a list of Features.
features = ee.FeatureCollection([
ee.Feature(ee.Geometry.Point(-122.4536, 37.7403),
{'property1': 100, 'property2': 100}),
ee.Feature(ee.Geometry.Point(-118.2294, 34.039),
{'property1': 200, 'property2': 300}),
])
# Map the function over the collection.
featureCollection = features.map(addField)
# Print the entire FeatureCollection.
print(featureCollection.getInfo())
# Print a selected property of one Feature.
print(featureCollection.first().get('sum').getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/Terrain/us_ned_mtpi.py | <filename>Datasets/Terrain/us_ned_mtpi.py
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
dataset = ee.Image('CSP/ERGo/1_0/US/mTPI')
usMtpi = dataset.select('elevation')
usMtpiVis = {
'min': -200.0,
'max': 200.0,
'palette': ['0b1eff', '4be450', 'fffca4', 'ffa011', 'ff0000'],
}
Map.setCenter(-105.8636, 40.3439, 11)
Map.addLayer(usMtpi, usMtpiVis, 'US mTPI')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Image/get_band_name_and_type.py | <filename>Image/get_band_name_and_type.py
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
roi = ee.Geometry.Point([-99.2182, 46.7824])
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filterBounds(roi) \
.filter(ee.Filter.calendarRange(6, 6, 'month')) \
.sort('DATE_ACQUIRED')
print(collection.size().getInfo())
first = ee.Image(collection.first())
print(first.bandNames().getInfo())
print(first.bandTypes().getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | AssetManagement/export_TimeSeries.py | # from IPython.display import Image
import ee
# define the geometry
geometry = ee.Geometry.Polygon([[116.49078369140625, 39.82219623803342],
[116.49456024169922, 39.763105626443306],
[116.57455444335938, 39.76336953661037],
[116.57421112060547, 39.8211414937017],
[116.49078369140625, 39.82219623803342]])
geometry = geometry.bounds()
# mask out cloud covered regions
def maskBadData(image):
valid = image.select('cfmask').eq(0)
clean = image.mask(valid)
return clean
# get the image collection
LC8 = ee.ImageCollection("LANDSAT/LC8_SR")
LC8_clean = LC8.filterDate("2015-01-01", "2015-12-31").filterBounds(geometry).map(maskBadData)
# get image informaiton
count = LC8_clean.size().getInfo()
sceneList = LC8_clean.aggregate_array('system:index').getInfo()
print(count)
print(sceneList)
# Loop to output each image
for i in range(0, count):
scenename = 'LANDSAT/LC8_SR/' + sceneList[i]
valid = ee.Image(scenename).select('cfmask').lt(2).clip(geometry)
meanStat = valid.reduceRegion(reducer=ee.Reducer.mean(), maxPixels=1e9).getInfo()
print(scenename, meanStat)
if meanStat['cfmask'] > 0:
print(scenename, " is valid")
layer = ee.Image(scenename).mask(valid).select(
['B2', 'B3', 'B4', 'B5', 'B6', 'B7'], ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'])
layerClip = layer.clip(geometry)
# visualize
# Image(url=layer.getThumbUrl())
# export
exportname = 'segID_0_' + sceneList[i]
task = ee.batch.Export.image.toDrive(image=layerClip, description=exportname, scale=30)
task.start()
# ee.batch.Task.list()
else:
print(scenename, " is invalid")
# print(exportname)
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/Vectors/us_census_counties.py | <reponame>liuxb555/earthengine-py-examples<filename>Datasets/Vectors/us_census_counties.py
#!/usr/bin/env python
"""Display US Counties.
"""
# import datetime
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
Map.setCenter(-110, 40, 5)
states = ee.FeatureCollection('TIGER/2018/States')
# .filter(ee.Filter.eq('STUSPS', 'MN'))
# // Turn the strings into numbers
states = states.map(lambda f: f.set('STATEFP', ee.Number.parse(f.get('STATEFP'))))
state_image = ee.Image().float().paint(states, 'STATEFP')
visParams = {
'palette': ['purple', 'blue', 'green', 'yellow', 'orange', 'red'],
'min': 0,
'max': 50,
'opacity': 0.8,
};
counties = ee.FeatureCollection('TIGER/2016/Counties')
image = ee.Image().paint(states, 0, 2)
Map.setCenter(-99.844, 37.649, 5)
# Map.addLayer(image, {'palette': 'FF0000'}, 'TIGER/2018/States')
Map.addLayer(image, visParams, 'TIGER/2016/States');
Map.addLayer(ee.Image().paint(counties, 0, 1), {}, 'TIGER/2016/Counties')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Image/from_name.py | #!/usr/bin/env python
"""Display an image given its ID."""
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
image = ee.Image('srtm90_v4')
vis_params = {'min': 0, 'max': 3000}
Map.addLayer(image, vis_params,"SRTM")
Map.setCenter(0,0, 2)
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/Water/jrc_global_surface_water.py | <reponame>liuxb555/earthengine-py-examples<gh_stars>10-100
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
dataset = ee.Image('JRC/GSW1_1/GlobalSurfaceWater')
occurrence = dataset.select('occurrence');
occurrenceVis = {'min': 0.0, 'max': 100.0, 'palette': ['ffffff', 'ffbbbb', '0000ff']}
Map.setCenter(59.414, 45.182, 6)
Map.addLayer(occurrence, occurrenceVis, 'Occurrence')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/naip_imagery.py | <filename>Datasets/naip_imagery.py
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
image = ee.Image('USDA/NAIP/DOQQ/m_4609915_sw_14_1_20100629')
Map.addLayer(image, {'bands': ['N', 'R', 'G']}, 'NAIP')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | MachineLearning/svm_classifier.py | <reponame>liuxb555/earthengine-py-examples
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Input imagery is a cloud-free Landsat 8 composite.
l8 = ee.ImageCollection('LANDSAT/LC08/C01/T1')
image = ee.Algorithms.Landsat.simpleComposite(**{
'collection': l8.filterDate('2018-01-01', '2018-12-31'),
'asFloat': True
})
# Use these bands for prediction.
bands = ['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'B10', 'B11']
# Manually created polygons.
forest1 = ee.Geometry.Rectangle(-63.0187, -9.3958, -62.9793, -9.3443)
forest2 = ee.Geometry.Rectangle(-62.8145, -9.206, -62.7688, -9.1735)
nonForest1 = ee.Geometry.Rectangle(-62.8161, -9.5001, -62.7921, -9.4486)
nonForest2 = ee.Geometry.Rectangle(-62.6788, -9.044, -62.6459, -8.9986)
# Make a FeatureCollection from the hand-made geometries.
polygons = ee.FeatureCollection([
ee.Feature(nonForest1, {'class': 0}),
ee.Feature(nonForest2, {'class': 0}),
ee.Feature(forest1, {'class': 1}),
ee.Feature(forest2, {'class': 1}),
])
# Get the values for all pixels in each polygon in the training.
training = image.sampleRegions(**{
# Get the sample from the polygons FeatureCollection.
'collection': polygons,
# Keep this list of properties from the polygons.
'properties': ['class'],
# Set the scale to get Landsat pixels in the polygons.
'scale': 30
})
# Create an SVM classifier with custom parameters.
classifier = ee.Classifier.svm(**{
'kernelType': 'RBF',
'gamma': 0.5,
'cost': 10
})
# Train the classifier.
trained = classifier.train(training, 'class', bands)
# Classify the image.
classified = image.classify(trained)
# Display the classification result and the input image.
Map.setCenter(-62.836, -9.2399, 9)
Map.addLayer(image, {'bands': ['B4', 'B3', 'B2'], 'max': 0.5, 'gamma': 2})
Map.addLayer(polygons, {}, 'training polygons')
Map.addLayer(classified,
{'min': 0, 'max': 1, 'palette': ['red', 'green']},
'deforestation')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/Terrain/us_ned_topo_diversity.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
dataset = ee.Image('CSP/ERGo/1_0/US/topoDiversity')
usTopographicDiversity = dataset.select('constant')
usTopographicDiversityVis = {
'min': 0.0,
'max': 1.0,
}
Map.setCenter(-111.313, 39.724, 6)
Map.addLayer(
usTopographicDiversity, usTopographicDiversityVis,
'US Topographic Diversity')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Filter/filter_string_ends_with.py | <filename>Filter/filter_string_ends_with.py
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
states = ee.FeatureCollection('TIGER/2018/States')
# Select states with its name ending with 'ia'
selected = states.filter(ee.Filter.stringEndsWith('NAME', 'ia'))
Map.centerObject(selected, 6)
Map.addLayer(ee.Image().paint(selected, 0, 2), {'palette': 'yellow'}, 'Selected')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/Vectors/us_census_states.py | <gh_stars>10-100
#!/usr/bin/env python
"""Display US States.
"""
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
fc = ee.FeatureCollection('TIGER/2018/States')
# .filter(ee.Filter.eq('STUSPS', 'MN'))
image = ee.Image().paint(fc, 0, 2)
Map.setCenter(-99.844, 37.649, 5)
Map.addLayer(image, {'palette': 'FF0000'}, 'TIGER/2018/States')
# Map.addLayer(fc, {}, 'US States')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Image/get_image_resolution.py | <reponame>liuxb555/earthengine-py-examples
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
naip = ee.Image('USDA/NAIP/DOQQ/m_3712213_sw_10_1_20140613')
Map.setCenter(-122.466123, 37.769833, 17)
Map.addLayer(naip, {'bands': ['N', 'R','G']}, 'NAIP')
naip_resolution =naip.select('N').projection().nominalScale()
print("NAIP resolution: ", naip_resolution.getInfo())
landsat = ee.Image('LANDSAT/LC08/C01/T1/LC08_044034_20140318')
landsat_resolution =landsat.select('B1').projection().nominalScale()
print("Landsat resolution: ", landsat_resolution.getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Visualization/random_color_visualizer.py |
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
dataset = ee.Image('USGS/NLCD/NLCD2016')
landcover = ee.Image(dataset.select('landcover'))
Map.setCenter(-95, 38, 5)
Map.addLayer(landcover.randomVisualizer(), {}, 'Landcover')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Image/landcover_cleanup.py | <filename>Image/landcover_cleanup.py<gh_stars>10-100
#!/usr/bin/env python
"""Landcover cleanup.
Display the MODIS land cover classification image with appropriate colors.
"""
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
Map.setCenter(-113.41842, 40.055489, 6)
# Force projection of 500 meters/pixel, which is the native MODIS resolution.
VECTORIZATION_SCALE = 500
image1 = ee.Image('MCD12Q1/MCD12Q1_005_2001_01_01')
image2 = image1.select(['Land_Cover_Type_1'])
image3 = image2.reproject('EPSG:4326', None, 500)
image4 = image3.focal_mode()
image5 = image4.focal_max(3).focal_min(5).focal_max(3)
image6 = image5.reproject('EPSG:4326', None, 500)
PALETTE = [
'aec3d4', # water
'152106', '225129', '369b47', '30eb5b', '387242', # forest
'6a2325', 'c3aa69', 'b76031', 'd9903d', '91af40', # shrub, grass, savannah
'111149', # wetlands
'cdb33b', # croplands
'cc0013', # urban
'33280d', # crop mosaic
'd7cdcc', # snow and ice
'f7e084', # barren
'6f6f6f' # tundra
]
vis_params = {'min': 0, 'max': 17, 'palette': PALETTE}
Map.addLayer(image2, vis_params, 'IGBP classification')
Map.addLayer(image3, vis_params, 'Reprojected')
Map.addLayer(image4, vis_params, 'Mode')
Map.addLayer(image5, vis_params, 'Smooth')
Map.addLayer(image6, vis_params, 'Smooth')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Gena/map_set_center.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Add some data to the Map
dem = ee.Image("JAXA/ALOS/AW3D30_V1_1").select('MED')
Map.addLayer(dem, {'min': 0, 'max': 5000, 'palette': ['000000', 'ffffff'] }, 'DEM', True)
# TEST Map.setCenter
Map.setCenter(0, 28, 2.5)
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Image/canny_edge_detector.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Canny Edge Detector example.
# Load an image and compute NDVI from it.
image = ee.Image('LANDSAT/LT05/C01/T1_TOA/LT05_031034_20110619')
ndvi = image.normalizedDifference(['B4','B3'])
# Detect edges in the composite.
canny = ee.Algorithms.CannyEdgeDetector(ndvi, 0.7)
# Mask the image with itself to get rid of areas with no edges.
canny = canny.updateMask(canny)
Map.setCenter(-101.05259, 37.93418, 13)
Map.addLayer(ndvi, {'min': 0, 'max': 1}, 'Landsat NDVI')
Map.addLayer(canny, {'min': 0, 'max': 1, 'palette': 'FF0000'}, 'Canny Edges')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | ImageCollection/filtering_by_band_names.py | <filename>ImageCollection/filtering_by_band_names.py<gh_stars>10-100
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
roi = ee.Geometry.Point([-99.2182, 46.7824])
collection = ee.ImageCollection('USDA/NAIP/DOQQ') \
.filterBounds(roi) \
.filter(ee.Filter.listContains("system:band_names", "N"))
print(collection.size().getInfo())
first = collection.first()
Map.centerObject(first, 13)
Map.addLayer(first, {'bands': ['N', 'R', 'G']}, 'NAIP')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Join/save_best_joins.py | <gh_stars>10-100
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load a primary 'collection': Landsat imagery.
primary = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filterDate('2014-04-01', '2014-06-01') \
.filterBounds(ee.Geometry.Point(-122.092, 37.42))
# Load a secondary 'collection': GRIDMET meteorological data
gridmet = ee.ImageCollection('IDAHO_EPSCOR/GRIDMET')
# Define a max difference filter to compare timestamps.
maxDiffFilter = ee.Filter.maxDifference(**{
'difference': 2 * 24 * 60 * 60 * 1000,
'leftField': 'system:time_start',
'rightField': 'system:time_start'
})
# Define the join.
saveBestJoin = ee.Join.saveBest(**{
'matchKey': 'bestImage',
'measureKey': 'timeDiff'
})
# Apply the join.
landsatMet = saveBestJoin.apply(primary, gridmet, maxDiffFilter)
# Print the result.
print(landsatMet.getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Join/save_all_joins.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load a primary 'collection': Landsat imagery.
primary = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filterDate('2014-04-01', '2014-06-01') \
.filterBounds(ee.Geometry.Point(-122.092, 37.42))
# Load a secondary 'collection': MODIS imagery.
modSecondary = ee.ImageCollection('MODIS/006/MOD09GA') \
.filterDate('2014-03-01', '2014-07-01')
# Define an allowable time difference: two days in milliseconds.
twoDaysMillis = 2 * 24 * 60 * 60 * 1000
# Create a time filter to define a match as overlapping timestamps.
timeFilter = ee.Filter.Or(
ee.Filter.maxDifference(**{
'difference': twoDaysMillis,
'leftField': 'system:time_start',
'rightField': 'system:time_end'
}),
ee.Filter.maxDifference(**{
'difference': twoDaysMillis,
'leftField': 'system:time_end',
'rightField': 'system:time_start'
})
)
# Define the join.
saveAllJoin = ee.Join.saveAll(**{
'matchesKey': 'terra',
'ordering': 'system:time_start',
'ascending': True
})
# Apply the join.
landsatModis = saveAllJoin.apply(primary, modSecondary, timeFilter)
# Display the result.
print('Join.saveAll:', landsatModis.getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Reducer/stats_by_group.py | <reponame>liuxb555/earthengine-py-examples
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load a collection of US census blocks.
blocks = ee.FeatureCollection('TIGER/2010/Blocks')
# Compute sums of the specified properties, grouped by state code.
sums = blocks \
.filter(ee.Filter.And(
ee.Filter.neq('pop10', {}),
ee.Filter.neq('housing10', {}))) \
.reduceColumns(**{
'selectors': ['pop10', 'housing10', 'statefp10'],
'reducer': ee.Reducer.sum().repeat(2).group(**{
'groupField': 2,
'groupName': 'state-code',
})
})
# Print the resultant Dictionary.
print(sums.getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | HowEarthEngineWorks/Projections.py | import ee
image = ee.Image('LANDSAT/LC8_L1T/LC80440342014077LGN00').select(0)
print('Projection, crs, and crs_transform:', image.projection().getInfo())
print('Scale in meters:', image.projection().nominalScale().getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | NAIP/ndwi_map.py | <reponame>liuxb555/earthengine-py-examples<filename>NAIP/ndwi_map.py
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
collection = ee.ImageCollection('USDA/NAIP/DOQQ')
fromFT = ee.FeatureCollection('ft:1CLldB-ULPyULBT2mxoRNv7enckVF0gCQoD2oH7XP')
polys = fromFT.geometry()
# polys = ee.Geometry.Polygon(
# [[[-99.29615020751953, 46.725459351792374],
# [-99.2116928100586, 46.72404725733022],
# [-99.21443939208984, 46.772037733479884],
# [-99.30267333984375, 46.77321343419932]]])
centroid = polys.centroid()
lng, lat = centroid.getInfo()['coordinates']
print("lng = {}, lat = {}".format(lng, lat))
lng_lat = ee.Geometry.Point(lng, lat)
naip = collection.filterBounds(polys)
naip_2015 = naip.filterDate('2015-01-01', '2015-12-31')
ppr = naip_2015.mosaic()
count = naip_2015.size().getInfo()
print("Count: ", count)
# print(naip_2015.size().getInfo())
vis = {'bands': ['N', 'R', 'G']}
Map.setCenter(lng, lat, 12)
Map.addLayer(ppr,vis)
# Map.addLayer(polys)
def NDWI(image):
"""A function to compute NDWI."""
ndwi = image.normalizedDifference(['G', 'N'])
ndwiViz = {'min': 0, 'max': 1, 'palette': ['00FFFF', '0000FF']}
ndwiMasked = ndwi.updateMask(ndwi.gte(0.05))
ndwi_bin = ndwiMasked.gt(0)
patch_size = ndwi_bin.connectedPixelCount(500, True)
large_patches = patch_size.eq(500)
large_patches = large_patches.updateMask(large_patches)
opened = large_patches.focal_min(1).focal_max(1)
return opened
ndwi_collection = naip_2015.map(NDWI)
# Map.addLayer(ndwi_collection)
# print(ndwi_collection.getInfo())
# downConfig = {'scale': 10, "maxPixels": 1.0E13, 'driveFolder': 'image'} # scale means resolution.
# img_lst = ndwi_collection.toList(100)
#
# taskParams = {
# 'driveFolder': 'image',
# 'driveFileNamePrefix': 'ndwi',
# 'fileFormat': 'KML'
# }
#
# for i in range(0, count):
# image = ee.Image(img_lst.get(i))
# name = image.get('system:index').getInfo()
# print(name)
# # task = ee.batch.Export.image(image, "ndwi2-" + name, downConfig)
# # task.start()
mosaic = ndwi_collection.mosaic().clip(polys)
fc = mosaic.reduceToVectors(eightConnected=True, maxPixels=59568116121, crs=mosaic.projection(), scale=1)
# Map.addLayer(fc)
taskParams = {
'driveFolder': 'image',
'driveFileNamePrefix': 'water',
'fileFormat': 'KML'
}
count = fromFT.size().getInfo()
Map.setCenter(lng, lat, 10)
for i in range(2, 2 + count):
watershed = fromFT.filter(ee.Filter.eq('system:index', str(i)))
re = fc.filterBounds(watershed)
task = ee.batch.Export.table(re, 'watershed-' + str(i), taskParams)
task.start()
# Map.addLayer(fc)
# lpc = fromFT.filter(ee.Filter.eq('name', 'Little Pipestem Creek'))
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Gena/test_sentinel2.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
image = ee.ImageCollection('COPERNICUS/S2') \
.filterDate('2017-01-01', '2017-01-02').median() \
.divide(10000).visualize(**{'bands': ['B12', 'B8', 'B4'], 'min': 0.05, 'max': 0.5})
Map.setCenter(35.2, 31, 13)
Map.addLayer(image, {}, 'Sentinel-2 images January, 2018')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | FeatureCollection/simplify_polygons.py |
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
waterSurface = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')
waterChange = waterSurface.select('transition')
# Select Permanent Water Only:
Permanent_Water = 1 # value 1 represents pixels of permenant water, no change
waterMask = waterChange.eq(Permanent_Water) # Water mask boolean = 1 to detect whater bodies
# Map.setCenter(24.43874, 61.58173, 10)
# Map.addLayer(waterMask, {}, 'Water Mask')
# Map.centerObject(masked)
OnlyLakes = waterMask.updateMask(waterMask)
roi = ee.Geometry.Polygon(
[[[22.049560546875, 61.171214253920965],
[22.0330810546875, 60.833021871926185],
[22.57415771484375, 60.83168327936567],
[22.5714111328125, 61.171214253920965]]])
classes = OnlyLakes.reduceToVectors(**{
'reducer': ee.Reducer.countEvery(),
'geometry': roi,
'scale': 30,
'maxPixels': 1e10
})
simpleClasses = classes.geometry().simplify(50)
Map.centerObject(ee.FeatureCollection(roi), 10)
Map.addLayer(ee.Image().paint(classes, 0, 2),{'palette': 'red'}, "original")
Map.addLayer(ee.Image().paint(simpleClasses, 0, 2),{'palette': 'blue'}, "simplified")
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Reducer/weighted_reductions.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load a Landsat 8 input image.
image = ee.Image('LANDSAT/LC08/C01/T1/LC08_044034_20140318')
# Creat an arbitrary region.
geometry = ee.Geometry.Rectangle(-122.496, 37.532, -121.554, 37.538)
# Make an NDWI image. It will have one band named 'nd'.
ndwi = image.normalizedDifference(['B3', 'B5'])
# Compute the weighted mean of the NDWI image clipped to the region.
weighted = ndwi.clip(geometry) \
.reduceRegion(**{
'reducer': ee.Reducer.sum(),
'geometry': geometry,
'scale': 30}) \
.get('nd')
# Compute the UN-weighted mean of the NDWI image clipped to the region.
unweighted = ndwi.clip(geometry) \
.reduceRegion(**{
'reducer': ee.Reducer.sum().unweighted(),
'geometry': geometry,
'scale': 30}) \
.get('nd')
# Observe the difference between weighted and unweighted reductions.
print('weighted:', weighted.getInfo())
print('unweighted', unweighted.getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/Terrain/srtm.py | <reponame>liuxb555/earthengine-py-examples
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
image = ee.Image('srtm90_v4')
# path = image.getDownloadUrl({
# 'scale': 30,
# 'crs': 'EPSG:4326',
# 'region': '[[-120, 35], [-119, 35], [-119, 34], [-120, 34]]'
# })
vis_params = {'min': 0, 'max': 3000}
Map.addLayer(image, vis_params, 'SRTM')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Datasets/Terrain/alos_chili.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
dataset = ee.Image('CSP/ERGo/1_0/Global/ALOS_CHILI')
alosChili = dataset.select('constant')
alosChiliVis = {
'min': 0.0,
'max': 255.0,
}
Map.setCenter(-105.8636, 40.3439, 11)
Map.addLayer(alosChili, alosChiliVis, 'ALOS CHILI')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Image/image_smoothing.py |
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
image = ee.Image('srtm90_v4')
smoothed = image.reduceNeighborhood(**{
'reducer': ee.Reducer.mean(),
'kernel': ee.Kernel.square(3),
})
# vis_params = {'min': 0, 'max': 3000}
# Map.addLayer(image, vis_params, 'SRTM original')
# Map.addLayer(smooth, vis_params, 'SRTM smoothed')
Map.setCenter(-112.40, 42.53, 12)
Map.addLayer(ee.Terrain.hillshade(image), {}, 'Original hillshade')
Map.addLayer(ee.Terrain.hillshade(smoothed), {}, 'Smoothed hillshade')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | GetStarted/03_finding_images.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1')
point = ee.Geometry.Point(-122.262, 37.8719)
start = ee.Date('2014-06-01')
finish = ee.Date('2014-10-01')
filteredCollection = ee.ImageCollection('LANDSAT/LC08/C01/T1') \
.filterBounds(point) \
.filterDate(start, finish) \
.sort('CLOUD_COVER', True)
first = filteredCollection.first()
# Define visualization parameters in an object literal.
vizParams = {'bands': ['B5', 'B4', 'B3'],
'min': 5000, 'max': 15000, 'gamma': 1.3}
Map.addLayer(first, vizParams, 'Landsat 8 image')
# Load a feature collection.
featureCollection = ee.FeatureCollection('TIGER/2016/States')
# Filter the collection.
filteredFC = featureCollection.filter(ee.Filter.eq('NAME', 'California'))
# Display the collection.
Map.addLayer(ee.Image().paint(filteredFC, 0, 2),
{'palette': 'red'}, 'California')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | ImageCollection/filtered_composite.py | #!/usr/bin/env python
"""Filter an image collection by date and region to make a median composite.
See also: Clipped composite, which crops the output image
instead of filtering the input collection.
"""
import datetime
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
Map.setCenter(-110, 40, 7)
# Filter to only include images within the colorado and utah boundaries.
polygon = ee.Geometry.Polygon([[
[-109.05, 37.0], [-102.05, 37.0], [-102.05, 41.0], # colorado
[-109.05, 41.0], [-111.05, 41.0], [-111.05, 42.0], # utah
[-114.05, 42.0], [-114.05, 37.0], [-109.05, 37.0]]])
# Create a Landsat 7 composite for Spring of 2000, and filter by
# the bounds of the FeatureCollection.
collection = (ee.ImageCollection('LE7_L1T')
.filterDate("2000-04-01", "2000-07-01")
.filterBounds(polygon))
# Select the median pixel.
image1 = collection.median()
# Select the red, green and blue bands.
image = image1.select('B3', 'B2', 'B1')
Map.addLayer(image, {'gain': [1.4, 1.4, 1.1]})
# Display the map.
Map
|
liuxb555/earthengine-py-examples | FeatureCollection/buffer.py | #!/usr/bin/env python
"""Buffer Example.
Display the area within 2 kilometers of any San Francisco BART station.
"""
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
Map.setCenter(-122.4, 37.7, 11)
bart_stations = ee.FeatureCollection('GOOGLE/EE/DEMOS/bart-locations')
buffered = bart_stations.map(lambda f: f.buffer(2000))
unioned = buffered.union()
Map.addLayer(unioned, {'color': '800080'}, "BART stations")
# Display the map.
Map
|
liuxb555/earthengine-py-examples | FeatureCollection/reducing_feature_collection.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
def areaDiff(feature):
area = feature.geometry().area().divide(1000 * 1000)
# Compute the differece between computed area and the area property.
diff = area.subtract(ee.Number.parse(feature.get('areasqkm')))
# Return the feature with the squared difference set to the 'diff' property.
return feature.set('diff', diff.pow(2))
# Load watersheds from a data table and filter to the continental US.
sheds = ee.FeatureCollection('USGS/WBD/2017/HUC06') \
.filterBounds(ee.Geometry.Rectangle(-127.18, 19.39, -62.75, 51.29))
# This function computes the squared difference between an area property
# and area computed directly from the feature's geometry.
# areaDiff = function(feature) {
# # Compute area in sq. km directly from the geometry.
# area = feature.geometry().area().divide(1000 * 1000)
# # Compute the differece between computed area and the area property.
# diff = area.subtract(ee.Number.parse(feature.get('areasqkm')))
# # Return the feature with the squared difference set to the 'diff' property.
# return feature.set('diff', diff.pow(2))
# }
# Calculate RMSE for population of difference pairs.
rmse = ee.Number(
# Map the difference function over the collection.
sheds.map(areaDiff)
# Reduce to get the mean squared difference. \
.reduceColumns(ee.Reducer.mean(), ['diff']) \
.get('mean')
) \
.sqrt()
# Print the result.
print('RMSE=', rmse.getInfo())
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Visualization/visualizing_feature_collection.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load a FeatureCollection from a table dataset: 'RESOLVE' ecoregions.
ecoregions = ee.FeatureCollection('RESOLVE/ECOREGIONS/2017')
# Display as default and with a custom color.
Map.addLayer(ecoregions, {}, 'default display')
Map.addLayer(ecoregions, {'color': 'FF0000'}, 'colored')
Map.addLayer(ecoregions.draw(**{'color': '006600', 'strokeWidth': 5}), {}, 'drawn')
# Create an empty image into which to paint the features, cast to byte.
empty = ee.Image().byte()
# Paint all the polygon edges with the same number and 'width', display.
outline = empty.paint(**{
'featureCollection': ecoregions,
'color': 1,
'width': 3
})
Map.addLayer(outline, {'palette': 'FF0000'}, 'edges')
# Paint the edges with different colors, display.
outlines = empty.paint(**{
'featureCollection': ecoregions,
'color': 'BIOME_NUM',
'width': 4
})
palette = ['FF0000', '00FF00', '0000FF']
Map.addLayer(outlines, {'palette': palette, 'max': 14}, 'different color edges')
# Paint the edges with different colors and 'width's.
outlines = empty.paint(**{
'featureCollection': ecoregions,
'color': 'BIOME_NUM',
'width': 'NNH'
})
Map.addLayer(outlines, {'palette': palette, 'max': 14}, 'different color, width edges')
# Paint the interior of the polygons with different colors.
fills = empty.paint(**{
'featureCollection': ecoregions,
'color': 'BIOME_NUM',
})
Map.addLayer(fills, {'palette': palette, 'max': 14}, 'colored fills')
# Paint both the fill and the edges.
filledOutlines = empty.paint(ecoregions, 'BIOME_NUM').paint(ecoregions, 0, 2)
Map.addLayer(filledOutlines, {'palette': ['000000'] + palette, 'max': 14}, 'edges and fills')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | JavaScripts/FromName.py | <reponame>liuxb555/earthengine-py-examples
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Display an image given its ID.
image = ee.Image('CGIAR/SRTM90_V4')
# Center the Map.
Map.setCenter(-110, 40, 5)
# Display the image.
Map.addLayer(image, {'min': 0, 'max': 3000}, 'SRTM')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Image/hsv_pan_sharpen.py | <filename>Image/hsv_pan_sharpen.py<gh_stars>10-100
#!/usr/bin/env python
"""HSV-based Pan-Sharpening example."""
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# There are many fine places to look here is one. Comment
# this out if you want to twiddle knobs while panning around.
Map.setCenter(-61.61625, -11.64273, 14)
# Grab a sample L7 image and pull out the RGB and pan bands
# in the range (0, 1). (The range of the pan band values was
# chosen to roughly match the other bands.)
image1 = ee.Image('LANDSAT/LE7/LE72300681999227EDC00')
rgb = image1.select('B3', 'B2', 'B1').unitScale(0, 255)
gray = image1.select('B8').unitScale(0, 155)
# Convert to HSV, swap in the pan band, and convert back to RGB.
huesat = rgb.rgbToHsv().select('hue', 'saturation')
upres = ee.Image.cat(huesat, gray).hsvToRgb()
# Display before and after layers using the same vis parameters.
visparams = {'min': [.15, .15, .25], 'max': [1, .9, .9], 'gamma': 1.6}
Map.addLayer(rgb, visparams, 'Orignal')
Map.addLayer(upres, visparams, 'Pansharpened')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | Visualization/image_color_ramp.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
# Load SRTM Digital Elevation Model data.
image = ee.Image('CGIAR/SRTM90_V4');
# Define an SLD style of discrete intervals to apply to the image.
sld_intervals = \
'<RasterSymbolizer>' + \
'<ColorMap type="intervals" extended="false" >' + \
'<ColorMapEntry color="#0000ff" quantity="0" label="0"/>' + \
'<ColorMapEntry color="#00ff00" quantity="100" label="1-100" />' + \
'<ColorMapEntry color="#007f30" quantity="200" label="110-200" />' + \
'<ColorMapEntry color="#30b855" quantity="300" label="210-300" />' + \
'<ColorMapEntry color="#ff0000" quantity="400" label="310-400" />' + \
'<ColorMapEntry color="#ffff00" quantity="1000" label="410-1000" />' + \
'</ColorMap>' + \
'</RasterSymbolizer>';
# Define an sld style color ramp to apply to the image.
sld_ramp = \
'<RasterSymbolizer>' + \
'<ColorMap type="ramp" extended="false" >' + \
'<ColorMapEntry color="#0000ff" quantity="0" label="0"/>' + \
'<ColorMapEntry color="#00ff00" quantity="100" label="100" />' + \
'<ColorMapEntry color="#007f30" quantity="200" label="200" />' + \
'<ColorMapEntry color="#30b855" quantity="300" label="300" />' + \
'<ColorMapEntry color="#ff0000" quantity="400" label="400" />' + \
'<ColorMapEntry color="#ffff00" quantity="500" label="500" />' + \
'</ColorMap>' + \
'</RasterSymbolizer>';
# Add the image to the map using both the color ramp and interval schemes.
Map.setCenter(-76.8054, 42.0289, 8);
Map.addLayer(image.sldStyle(sld_intervals), {}, 'SLD intervals');
Map.addLayer(image.sldStyle(sld_ramp), {}, 'SLD ramp');
# Display the map.
Map
|
liuxb555/earthengine-py-examples | ImageCollection/reducing_collection.py | import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
def addTime(image):
return image.addBands(image.metadata('system:time_start').divide(1000 * 60 * 60 * 24 * 365))
# Load a Landsat 8 collection for a single path-row.
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filter(ee.Filter.eq('WRS_PATH', 44)) \
.filter(ee.Filter.eq('WRS_ROW', 34)) \
.filterDate('2014-01-01', '2015-01-01')
# Compute a median image and display.
median = collection.median()
Map.setCenter(-122.3578, 37.7726, 12)
Map.addLayer(median, {'bands': ['B4', 'B3', 'B2'], 'max': 0.3}, 'median')
# Reduce the collection with a median reducer.
median = collection.reduce(ee.Reducer.median())
# Display the median image.
Map.addLayer(median,
{'bands': ['B4_median', 'B3_median', 'B2_median'], 'max': 0.3},
'also median')
# # This function adds a band representing the image timestamp.
# addTime = function(image) {
# return image.addBands(image.metadata('system:time_start')
# # Convert milliseconds from epoch to years to aid in
# # interpretation of the following trend calculation. \
# .divide(1000 * 60 * 60 * 24 * 365))
# }
# Load a MODIS collection, filter to several years of 16 day mosaics,
# and map the time band function over it.
collection = ee.ImageCollection('MODIS/006/MYD13A1') \
.filterDate('2004-01-01', '2010-10-31') \
.map(addTime)
# Select the bands to model with the independent variable first.
trend = collection.select(['system:time_start', 'EVI']) \
.reduce(ee.Reducer.linearFit())
# Display the trend with increasing slopes in green, decreasing in red.
Map.setCenter(-96.943, 39.436, 5)
Map.addLayer(
trend,
{'min': 0, 'max': [-100, 100, 10000], 'bands': ['scale', 'scale', 'offset']},
'EVI trend')
# Display the map.
Map
|
liuxb555/earthengine-py-examples | NAIP/filtering.py | <reponame>liuxb555/earthengine-py-examples
import ee
import geemap
# Create a map centered at (lat, lon).
Map = geemap.Map(center=[40, -100], zoom=4)
collection = ee.ImageCollection('USDA/NAIP/DOQQ')
fromFT = ee.FeatureCollection('ft:1CLldB-ULPyULBT2mxoRNv7enckVF0gCQoD2oH7XP')
polys = fromFT.geometry()
centroid = polys.centroid()
lng, lat = centroid.getInfo()['coordinates']
print("lng = {}, lat = {}".format(lng, lat))
# lat = 46.80514
# lng = -99.22023
lng_lat = ee.Geometry.Point(lng, lat)
# naip = collection.filterBounds(lng_lat)
naip = collection.filterBounds(polys)
naip_2015 = naip.filterDate('2015-01-01', '2015-12-31')
ppr = naip_2015.mosaic().clip(polys)
# print(naip_2015.size().getInfo())
vis = {'bands': ['N', 'R', 'G']}
Map.setCenter(lng, lat, 10)
# Map.addLayer(naip_2015,vis)
Map.addLayer(ppr,vis)
# Map.addLayer(fromFT)
# image = ee.Image('USDA/NAIP/DOQQ/m_4609915_sw_14_1_20100629')
# Map.setCenter(lng, lat, 12)
# Map.addLayer(image,vis)
# Display the map.
Map
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.