content
stringlengths 5
1.05M
|
|---|
import random
# This lib is used to choose a random word from the list of word
# The user can feed his name
name = input("What's your Name? ")
print("Good Luck ! ", name)
words = ['education','rainbow', 'computer', 'science', 'programming',
'python', 'mathematics', 'player', 'condition',
'reverse', 'water', 'board', 'hacktoberfest']
# Our function will choose a random from the give list
word = random.choice(words)
print("Guess the characters")
guesses = ''
# You can reduce/increase the number of turns
turns = 10
while turns > 0:
# This holds the number of times a user fails
failed = 0
# The letter you feed is taken as input one at a time
for char in word:
# Comparing that character with our set
if char in guesses:
print(char)
else:
print("_")
# For every failure 1 will be added in failed count
failed += 1
if failed == 0:
# The User will win the game if failure is 0
print("You Win")
# This prints the correct word
print("The word is: ", word)
break
# If user has input the wrong alphabet then the user is given a next chance
guess = input("guess a character:")
# Every input character will be stored in guesses
guesses += guess
# Check input with the character in word
if guess not in word:
turns -= 1
# if the character doesn’t match the word then “Wrong” will be given as output
print("Wrong")
# this will print the number of turns left
print("You have", + turns, 'more guesses')
if turns == 0:
print("You Loose")
|
import discord
import asyncio
from stem_server_roles import HOUSING_ROLE_IDS, MAJOR_ROLE_IDS, CLASS_ROLE_IDS, GRAD_YEAR_ROLE_IDS, SPECIAL_ROLE_IDS, PRONOUN_ROLE_IDS
from discord.utils import get
from bot_helper import del_convo, get_mhom, contains_role
def merge_dict(dicts): # merges dictionaries together
z = dicts[0].copy()
for i in range(1, len(dicts)):
z.update(dicts[i])
return z
def capitalize_all_words(in_str, separator):
words = in_str.split(separator)
output = ''
for word in words:
output += word.capitalize() + separator
return output[:-1]
async def list_roles(ctx, client):
getlist = discord.Embed(color=discord.Color.blue())
getlist.set_author(
name='Roles | Use $get [role] in #role-assignment to add a role',
icon_url='https://cdn.discordapp.com/attachments/501594682820788224/558396074868342785/UMass_Stem_discord_logo.png')
housing_role_list = ''
for role in HOUSING_ROLE_IDS.values():
housing_role_list += capitalize_all_words(role[0], ' ') + '\n'
getlist.add_field(name = 'Housing Roles', value=housing_role_list, inline=False)
major_role_list = ''
for role in MAJOR_ROLE_IDS.values():
major_role_list += capitalize_all_words(role[0], ' ') + '\n'
getlist.add_field(name = 'Major Roles', value=major_role_list, inline=False)
grad_role_list = ''
for role in GRAD_YEAR_ROLE_IDS.values():
grad_role_list += capitalize_all_words(role[0], ' ') + '\n'
getlist.add_field(name = 'Graduation Year Roles', value=grad_role_list, inline=False)
pronoun_role_list = ''
for role in PRONOUN_ROLE_IDS.values():
pronoun_role_list += capitalize_all_words(role[0], '/') + '\n'
getlist.add_field(name = 'Pronoun Roles', value=pronoun_role_list, inline=False)
class_role_list = ''
for role in CLASS_ROLE_IDS.values():
name = role[0].upper()
if class_role_list == '':
class_role_list += '__Computer Science__\n'
if name.startswith('CS') or name.startswith('CICS'):
class_role_list += name + ', '
continue
if name.endswith('127'):
class_role_list = class_role_list[:len(class_role_list)-2] # trim last ', '
class_role_list += '\n__Mathematics__\n'
if name.startswith('MATH') or name.startswith('STATS'):
class_role_list += name + ', '
continue
if name.endswith('499'):
class_role_list = class_role_list[:len(class_role_list)-2] # trim last ', '
class_role_list += '\n__Other__\n'
class_role_list += name + ', '
class_role_list = class_role_list[:len(class_role_list)-2] # trim last ', '
getlist.add_field(name = 'Class Specific Roles', value=class_role_list, inline=False)
getlist.set_footer(text='If you want a role added to the server @Caleb or suggest it in #suggestions')
await ctx.message.author.send(embed=getlist)
async def list_my_roles(ctx, client, member):
housing_roles, major_roles, graduation_year, pronoun, class_specific, special = '', '', '', '', '', ''
class_specific_roles, special_roles = [], []
for role in sorted(member.roles, key=lambda x: x.name):
if role.id in PRONOUN_ROLE_IDS:
pronoun = capitalize_all_words(role.name, '/') + '\n'
if role.id in CLASS_ROLE_IDS:
class_specific += role.name.upper() + '\n'
name = capitalize_all_words(role.name, ' ')
if role.id in HOUSING_ROLE_IDS:
housing_roles += name + '\n'
if role.id in MAJOR_ROLE_IDS:
major_roles += name + '\n'
if role.id in GRAD_YEAR_ROLE_IDS:
graduation_year = name + '\n'
if role.id in SPECIAL_ROLE_IDS:
special += name + '\n'
if special:
mylist = discord.Embed(color=0xb5a2c8, description= '**' + special + '**')
else:
mylist = discord.Embed(color=0xb5a2c8)
mylist.set_author(name = '' + member.name + '\'s roles', icon_url = member.avatar_url)
if not housing_roles:
mylist.add_field(
name = 'Housing Roles',
value='Missing housing role, set your residential area role in #role-assignment',
inline=False)
else:
mylist.add_field(name = 'Housing Roles', value=housing_roles, inline=False)
if not major_roles:
mylist.add_field(
name = 'Major Roles',
value='Missing major role, set at least one major/minor role in #role-assignment',
inline=False)
else:
mylist.add_field(name = 'Major Roles', value=major_roles, inline=False)
if class_specific:
mylist.add_field(name = 'Class Roles', value=class_specific, inline=False)
if graduation_year:
mylist.add_field(name = 'Graduation Year', value=graduation_year, inline=False)
if pronoun:
mylist.add_field(name = 'Pronoun', value=pronoun, inline=False)
message = await ctx.channel.send(embed=mylist)
await del_convo(ctx.message, message, 30)
async def stats(ctx):
contents = ctx.message.content[6:].strip().lower()
if len(contents) == 0:
message = await ctx.send(embed=discord.Embed(
description="You must specify a valid role, for example: $stats Computer Science\n" \
"This message will auto-delete in 15 seconds",
color=discord.Color.red()))
await del_convo(ctx.message, message, 15)
return
possible_roles = merge_dict([HOUSING_ROLE_IDS, MAJOR_ROLE_IDS, CLASS_ROLE_IDS, GRAD_YEAR_ROLE_IDS, PRONOUN_ROLE_IDS, SPECIAL_ROLE_IDS, PRONOUN_ROLE_IDS])
found = False
for role_id, role_names in possible_roles.items():
for alias in role_names:
if contents == alias: # valid role
found = True
role = get(ctx.guild.roles, id=role_id)
break
if not found:
message = await ctx.send(embed=discord.Embed(
description="Invalid role specified. You must specify a valid role, for example: $stats Computer Science\n" \
"This message will auto-delete in 15 seconds",
color=discord.Color.red()))
await del_convo(ctx.message, message, 15)
return
message = await ctx.send(embed=discord.Embed(
title=role.name + " Role Statistics",
description = "Count: " + str(len(role.members)) + "\n" \
"Percentage of Members: {:.3f}%".format((len(role.members)/ctx.message.guild.member_count)*100),
color=discord.Color.greyple()))
await del_convo(ctx.message, message, 30)
async def block_multiple_restricted_roles(member, channel, ctx, id_dict, role_name, str_role_type):
for roles in id_dict.values():
if role_name in roles:
message = await channel.send(embed=discord.Embed(
description="I'm sorry, " + member.name + ", you already have a " + str_role_type +" role!\n" \
"Use the $remove [role] command to remove it in order to add a different one!\n" \
"This message will auto-delete in 15 seconds",
color=discord.Color.gold()))
await del_convo(ctx.message, message, 15)
return True
return False
async def contains_role(roles, name):
"""
Checks if the role list contains a role with the name 'name'
Parameters:
- roles: list of role objects
- name: name of role
Returns:
- role: role object that has the name 'name'
"""
for r in roles:
if r.name.lower() == name:
return r
async def stem_add_role(ctx, member, client):
channel = ctx.channel
available_roles = merge_dict([HOUSING_ROLE_IDS, MAJOR_ROLE_IDS, CLASS_ROLE_IDS, GRAD_YEAR_ROLE_IDS, PRONOUN_ROLE_IDS])
role_lower = ctx.message.content[5:].lower().strip().replace('[', '').replace(']', '')
is_grad_role, is_pronoun_role = False, False
#check if user already has a graduation role or a pronoun role
for role in member.roles:
for grad_years in GRAD_YEAR_ROLE_IDS.values():
if role.name.lower() in grad_years:
is_grad_role = True
for pronouns in PRONOUN_ROLE_IDS.values():
if role.name.lower() in pronouns:
is_pronoun_role = True
role_to_add = None # role requested by the user to be added
for role_names in available_roles.values():
if role_lower in role_names:
# check if member already has the requested role
member_role = await contains_role(member.roles, role_names[0])
if member_role is not None:
message = await channel.send(embed=discord.Embed(
description="I'm sorry, " + member.name + ", you already have this role!\n" \
"Use the `$remove " + member_role.name + "` command to remove it!\n" \
"This message will auto-delete in 15 seconds",
color=discord.Color.gold()))
await del_convo(ctx.message, message, 15)
return
# if the member doesnt already have the requested role get the role from the guild roles
role_to_add = await contains_role(ctx.message.guild.roles, role_names[0])
# make sure member isn't trying to add a second grad year role, they should only be allowed to have one
if is_grad_role and await block_multiple_restricted_roles(member,
channel,
ctx,
GRAD_YEAR_ROLE_IDS,
role_to_add.name.lower(),
'graduation year'): return
# make sure member isn't trying to add a second pronoun role
if is_pronoun_role and await block_multiple_restricted_roles(member,
channel,
ctx,
PRONOUN_ROLE_IDS,
role_to_add.name.lower(),
'pronoun'): return
await member.add_roles(role_to_add)
await check_major_housing_role(member, client, role_to_add, True)
message = await channel.send(embed=discord.Embed(
description="Added " + role_to_add.name + " to " + member.name + "\n" \
"Use the `$remove " + role_to_add.name + "` command to remove it!\n" \
"This message will auto-delete in 15 seconds",
color=discord.Color.green()))
await del_convo(ctx.message, message, 15)
return
message = await channel.send(embed=discord.Embed(
description="I'm sorry, " + member.name + ", there is no role with that name!\n" \
"Use the `$getlist` command to see the available roles\n" \
"This message will auto-delete in 15 seconds",
color=discord.Color.red()))
await del_convo(ctx.message, message, 15)
# recheck after 15 seconds to confirm if the user should still have the mhom role or if they should have it
await check_major_housing_role(member, client, role_to_add, True)
async def check_major_housing_role(member, client, role, is_add):
member_has_hr = False
member_has_m = False
curr_roles = member.roles
if is_add:
curr_roles.append(role)
else:
if role in curr_roles:
curr_roles.remove(role)
for role in member.roles:
if role.id in HOUSING_ROLE_IDS:
member_has_hr = True
if role.id in MAJOR_ROLE_IDS:
member_has_m = True
mhom = await get_mhom(member.guild.roles)
if mhom in member.roles: # check if the member has the missing housing or major role
if member_has_hr and member_has_m:
await member.remove_roles(mhom) #removes missing housing or major role
else: # if not then add it to them if they need it
if not member_has_hr or not member_has_m:
await member.add_roles(mhom) #adds missing housing or major role if they dont have the roles
async def stem_remove_role(ctx, member, client):
channel = ctx.channel
removable_roles = merge_dict([HOUSING_ROLE_IDS, MAJOR_ROLE_IDS, CLASS_ROLE_IDS, GRAD_YEAR_ROLE_IDS, PRONOUN_ROLE_IDS])
role_lower = ctx.message.content[8:].lower().strip().replace('[', '').replace(']', '') # requested role
rid = -1 # requested role id
# get the requested role's role id
for role_id, r_aliases in removable_roles.items():
if role_lower in r_aliases:
rid = role_id
break
# role doesn't exist or it's a role that shouldn't be removed
if rid == -1:
message = await channel.send(embed=discord.Embed(
description="I'm sorry, " + member.name + ", that is not a valid removable role.\n" \
"This message will auto-delete in 15 seconds",
color=discord.Color.red()))
await del_convo(ctx.message, message, 15)
return
# check to see if the user has the requested role
for role in member.roles:
if role.id == rid:
await member.remove_roles(role)
await check_major_housing_role(member, client, role, False)
message = await channel.send(embed=discord.Embed(
description="Removed " + role.name + " from " + member.name + "\n" \
"Use the `$get " + role.name + "` command to add it back!\n" \
"This message will auto-delete in 15 seconds",
color=discord.Color.green()))
await del_convo(ctx.message, message, 15)
return
message = await channel.send(embed=discord.Embed(
description="I'm sorry, " + member.name + ", you don't have a role with that name\n" \
"This message will auto-delete in 15 seconds",
color=discord.Color.red()))
await del_convo(ctx.message, message, 15)
# recheck after 15 seconds to confirm if the user should still have the mhom role or if they should have it
await check_major_housing_role(member, client, role_to_add, True)
|
#!/usr/bin/env python3
"""
This module provides classes used to define a periodic structure.
"""
# adapted from original version in pymatgen version from pymatgen
import math
import collections
import itertools
from abc import ABCMeta, abstractmethod, abstractproperty
import warnings
import numpy as np
from fractions import gcd
from .lattice import Lattice
from .util.periodic_table import Element, Specie, get_el_sp
from .sites import PeriodicSite
from .composition import Composition
from .coord_utils import get_angle, all_distances
from .util.units import Mass, Length
from .coord_utils import supercell_latticepoints
from .util.tool import non_1to1
class SiteCollection(collections.Sequence):
"""
Basic SiteCollection. Essentially a sequence of Sites or PeriodicSites.
This serves as a base class for Molecule (a collection of Site, i.e., no
periodicity) and Structure (a collection of PeriodicSites, i.e.,
periodicity). Not meant to be instantiated directly.
"""
__metaclass__ = ABCMeta
#Tolerance in Angstrom for determining if sites are too close.
DISTANCE_TOLERANCE = 0.01
@abstractproperty
def sites(self):
"""
Returns a tuple of sites.
"""
return
@abstractmethod
def get_distance(self, i, j):
"""
Returns distance between sites at index i and j.
Args:
i (int): Index of first site
j (int): Index of second site
Returns:
(float) Distance between sites at index i and index j.
"""
return
@property
def distance_matrix(self):
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this is overwritten to return the nearest image
distance.
"""
return all_distances(self.cart_coords, self.cart_coords)
@property
def species(self):
"""
Only works for ordered structures.
Disordered structures will raise an AttributeError.
Returns:
([Specie]) List of species at each site of the structure.
"""
return [site.specie for site in self]
@property
def elements(self):
return [site.specie.__str__() for site in self]
@property
def species_and_occu(self):
"""
List of species and occupancies at each site of the structure.
"""
return [site.species_and_occu for site in self]
@property
def ntypesp(self):
"""Number of types of atoms."""
return len(self.types_of_specie)
@property
def types_of_specie(self):
"""
List of types of specie. Only works for ordered structures.
Disordered structures will raise an AttributeError.
"""
# Cannot use set since we want a deterministic algorithm.
types = []
for site in self:
if site.specie not in types:
types.append(site.specie)
return types
@property
def types_of_elements(self):
return [i.symbol for i in self.types_of_specie]
def group_by_types(self):
"""Iterate over species grouped by type"""
for t in self.types_of_specie:
for site in self:
if site.specie == t:
yield site
def indices_from_symbol(self, symbol):
"""
Returns a tuple with the sequential indices of the sites
that contain an element with the given chemical symbol.
"""
indices = []
for i, specie in enumerate(self.species):
if specie.symbol == symbol:
indices.append(i)
return tuple(indices)
@property
def symbol_set(self):
"""
Tuple with the set of chemical symbols.
Note that len(symbol_set) == len(types_of_specie)
"""
return tuple([specie.symbol for specie in self.types_of_specie])
@property
def atomic_masses(self):
"""List of atomic masses."""
return [site.specie.atomic_mass for site in self]
@property
def atomic_numbers(self):
"""List of atomic numbers."""
return [site.specie.number for site in self]
@property
def site_properties(self):
"""
Returns the site properties as a dict of sequences. E.g.,
{"magmom": (5,-5), "charge": (-4,4)}.
"""
props = collections.defaultdict(list)
for site in self:
for k, v in site.properties.items():
props[k].append(v)
return props
def __contains__(self, site):
return site in self.sites
def __iter__(self):
return self.sites.__iter__()
def __getitem__(self, ind):
return self.sites[ind]
def __len__(self):
return len(self.sites)
def __hash__(self):
#for now, just use the composition hash code.
return self.composition.__hash__()
@property
def num_sites(self):
"""
Number of sites.
"""
return len(self)
@property
def cart_coords(self):
"""
Returns a list of the cartesian coordinates of sites in the structure.
"""
return np.array([site.coords for site in self])
@property
def formula(self):
"""
(str) Returns the formula.
"""
return self.composition.formula
@property
def composition(self):
"""
(Composition) Returns the composition
"""
elmap = collections.defaultdict(float)
for site in self:
for species, occu in site.species_and_occu.items():
elmap[species] += occu
return Composition(elmap)
@property
def charge(self):
"""
Returns the net charge of the structure based on oxidation states. If
Elements are found, a charge of 0 is assumed.
"""
charge = 0
for site in self:
for specie, amt in site.species_and_occu.items():
charge += getattr(specie, "oxi_state", 0) * amt
return charge
@property
def is_ordered(self):
"""
Checks if structure is ordered, meaning no partial occupancies in any
of the sites.
"""
return all((site.is_ordered for site in self))
def get_angle(self, i, j, k):
"""
Returns angle specified by three sites.
Args:
i (int): Index of first site.
j (int): Index of second site.
k (int): Index of third site.
Returns:
(float) Angle in degrees.
"""
v1 = self[i].coords - self[j].coords
v2 = self[k].coords - self[j].coords
return get_angle(v1, v2, units="degrees")
def get_dihedral(self, i, j, k, l):
"""
Returns dihedral angle specified by four sites.
Args:
i (int): Index of first site
j (int): Index of second site
k (int): Index of third site
l (int): Index of fourth site
Returns:
(float) Dihedral angle in degrees.
"""
v1 = self[k].coords - self[l].coords
v2 = self[j].coords - self[k].coords
v3 = self[i].coords - self[j].coords
v23 = np.cross(v2, v3)
v12 = np.cross(v1, v2)
return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23),
np.dot(v12, v23)))
def is_valid(self, tol=DISTANCE_TOLERANCE):
"""
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.01A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
"""
if len(self.sites) == 1:
return True
all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]
return bool(np.min(all_dists) > tol)
class IStructure(SiteCollection):
"""
Basic immutable Structure object with periodicity. Essentially a sequence
of PeriodicSites having a common lattice. IStructure is made to be
(somewhat) immutable so that they can function as keys in a dict. To make
modifications, use the standard Structure object instead. Structure
extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a
structure is equivalent to going through the sites in sequence.
"""
def __init__(self, lattice, species, coords, validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=False,
site_properties=None,site_properties_T=None,
intensive_properties={}, extensive_properties={}):
"""
Create a periodic structure.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
fractional_coords (Nx3 array): list of fractional coordinates of
each species.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
site_properties_T (list): alternative way to specify site_properties
essentially site_properties transposed
"""
if len(species) != len(coords):
raise StructureError("The list of atomic species must be of the"
"same length as the list of fractional"
" coordinates.")
if isinstance(lattice, Lattice):
self._lattice = lattice
else:
self._lattice = Lattice(lattice)
sites = []
for i in range(len(species)):
prop = None
if site_properties:
prop = {k: v[i] for k, v in site_properties.items()}
elif site_properties_T:
prop = site_properties_T[i]
sites.append(
PeriodicSite(species[i], coords[i], self._lattice,
to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Structure contains sites that are ",
"less than 0.01 Angstrom apart!"))
self.intensive_properties=intensive_properties
self.extensive_properties=extensive_properties
@classmethod
def from_sites(cls, sites, validate_proximity=False,
to_unit_cell=False):
"""
Convenience constructor to make a Structure from a list of sites.
Args:
sites: Sequence of PeriodicSites. Sites must have the same
lattice.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to translate sites into the unit
cell.
Returns:
(Structure) Note that missing properties are set as None.
"""
prop_keys = []
props = {}
lattice = None
for i, site in enumerate(sites):
if not lattice:
lattice = site.lattice
elif site.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
for k, v in site.properties.items():
if k not in prop_keys:
prop_keys.append(k)
props[k] = [None] * len(sites)
props[k][i] = v
for k, v in props.items():
if any((vv is None for vv in v)):
warnings.warn("Not all sites have property %s. Missing values "
"are set to None." % k)
return cls(lattice, [site.species_and_occu for site in sites],
[site.frac_coords for site in sites],
site_properties=props,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell)
@property
def distance_matrix(self):
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this should return the nearest image distance.
"""
return self.lattice.get_all_distances(self.frac_coords,
self.frac_coords)
@property
def distance_matrix_noself(self):
return self.lattice.get_all_distances(self.frac_coords,
self.frac_coords, nogamma=True)
@property
def sites(self):
"""
Returns an iterator for the sites in the Structure.
"""
return self._sites
@property
def lattice(self):
"""
Lattice of the structure.
"""
return self._lattice
@property
def reciprocal_lattice(self):
"""
Reciprocal lattice of the structure.
"""
return self._lattice.reciprocal_lattice
def lattice_vectors(self, space="r"):
"""
Returns the vectors of the unit cell in Angstrom.
Args:
space: "r" for real space vectors, "g" for reciprocal space basis
vectors.
"""
if space.lower() == "r":
return self.lattice.matrix
if space.lower() == "g":
return self.lattice.reciprocal_lattice.matrix
raise ValueError("Wrong value for space: %s " % space)
@property
def density(self):
"""
Returns the density in units of g/cc
"""
m = Mass(self.composition.weight, "amu")
return m.to("g") / (self.volume * Length(1, "ang").to("cm") ** 3)
def __eq__(self, other):
if other is None:
return False
if len(self) != len(other):
return False
if self._lattice != other._lattice:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
@property
def frac_coords(self):
"""
Fractional coordinates as a Nx3 numpy array.
"""
return np.array([site.frac_coords for site in self._sites])
@property
def volume(self):
"""
Returns the volume of the structure.
"""
return self._lattice.volume
def get_distance(self, i, j, jimage=None):
"""
Get distance between site i and j assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the jimage nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations if the index
jimage of atom j is specified it returns the distance between the i
atom and the specified jimage atom.
Args:
i (int): Index of first site
j (int): Index of second site
jimage: Number of lattice translations in each lattice direction.
Default is None for nearest image.
Returns:
distance
"""
return self[i].distance(self[j], jimage)
def distance_and_image(self, i, j, jimage=None):
return self[i].distance_and_image(self[j], jimage)
def get_sites_in_sphere(self, pt, r, include_index=False):
"""
Find all sites within a sphere from the point. This includes sites
in other periodic images.
Algorithm:
1. place sphere of radius r in crystal and determine minimum supercell
(parallelpiped) which would contain a sphere of radius r. for this
we need the projection of a_1 on a unit vector perpendicular
to a_2 & a_3 (i.e. the unit vector in the direction b_1) to
determine how many a_1"s it will take to contain the sphere.
Nxmax = r * length_of_b_1 / (2 Pi)
2. keep points falling within r.
Args:
pt (3x1 array): cartesian coordinates of center of sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
site_fcoords = np.mod(self.frac_coords, 1)
neighbors = []
for fcoord, dist, i in self._lattice.get_points_in_sphere(
site_fcoords, pt, r):
nnsite = PeriodicSite(self[i].species_and_occu,
fcoord, self._lattice,
properties=self[i].properties)
neighbors.append((nnsite, dist) if not include_index
else (nnsite, dist, i))
return neighbors
def get_neighbors(self, site, r, include_index=False):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site:
site, which is the center of the sphere.
r:
radius of sphere.
include_index:
boolean that determines whether the non-supercell site index
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
nn = self.get_sites_in_sphere(site.coords, r,
include_index=include_index)
return [d for d in nn if site != d[0]]
def get_all_neighbors(self, r, include_index=False):
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
Args:
r (float): Radius of sphere.
include_index (bool): Whether to include the non-supercell site
in the returned data
Returns:
A list of a list of nearest neighbors for each site, i.e.,
[[(site, dist, index) ...], ..]
Index only supplied if include_index = True.
The index is the index of the site in the original (non-supercell)
structure. This is needed for ewaldmatrix by keeping track of which
sites contribute to the ewald sum.
"""
# Use same algorithm as get_sites_in_sphere to determine supercell but
# loop over all atoms in crystal
recp_len = self.lattice.reciprocal_lattice.abc
sr = r + 0.15
nmax = [sr * l / (2 * math.pi) for l in recp_len]
site_nminmax = []
floor = math.floor
inds = (0, 1, 2)
for site in self:
pcoords = site.frac_coords
inmax = [int(floor(pcoords[i] + nmax[i])) for i in inds]
inmin = [int(floor(pcoords[i] - nmax[i])) for i in inds]
site_nminmax.append(zip(inmin, inmax))
nmin = [min([i[j][0] for i in site_nminmax]) for j in inds]
nmax = [max([i[j][1] for i in site_nminmax]) for j in inds]
all_ranges = [range(nmin[i], nmax[i] + 1) for i in inds]
neighbors = [list() for i in range(len(self._sites))]
all_fcoords = np.mod(self.frac_coords, 1)
site_coords = np.array(self.cart_coords)
latt = self._lattice
frac_2_cart = latt.get_cartesian_coords
n = len(self)
indices = np.array(range(n))
for image in itertools.product(*all_ranges):
for (j, fcoord) in enumerate(all_fcoords):
fcoords = fcoord + image
coords = frac_2_cart(fcoords)
submat = np.tile(coords, (n, 1))
dists = np.power(site_coords - submat, 2)
dists = np.sqrt(dists.sum(axis=1))
withindists = (dists <= r) * (dists > 1e-8)
sp = self[j].species_and_occu
props = self[j].properties
for i in indices[withindists]:
nnsite = PeriodicSite(sp, fcoords, latt,
properties=props)
item = (nnsite, dists[i], j) if include_index else (
nnsite, dists[i])
neighbors[i].append(item)
return neighbors
def get_neighbors_in_shell(self, origin, r, dr):
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
outer = self.get_sites_in_sphere(origin, r + dr)
inner = r - dr
return [(site, dist) for (site, dist) in outer if dist > inner]
def get_sorted_structure(self):
"""
Get a sorted copy of the structure.
Sites are sorted by the electronegativity of the species.
"""
# sites = sorted(self)
# WARNING: Sorting by electronegativity was NOT implemented?????
sites= self.sites
return self.__class__.from_sites(sites)
def get_reduced_structure(self, reduction_algo="niggli"):
"""
Get a reduced structure.
Args:
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL".
"""
if reduction_algo == "niggli":
reduced_latt = self._lattice.get_niggli_reduced_lattice()
elif reduction_algo == "LLL":
reduced_latt = self._lattice.get_lll_reduced_lattice()
else:
raise ValueError("Invalid reduction algo : {}"
.format(reduction_algo))
return self.__class__(reduced_latt, self.species_and_occu,
self.cart_coords,
coords_are_cartesian=True, to_unit_cell=True)
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
if not sanitize:
return self.__class__(self._lattice,
self.species_and_occu,
self.frac_coords,
site_properties=props)
else:
reduced_latt = self._lattice.get_lll_reduced_lattice()
new_sites = []
for i, site in enumerate(self):
frac_coords = reduced_latt.get_fractional_coords(site.coords)
site_props = {}
for p in props:
site_props[p] = props[p][i]
new_sites.append(PeriodicSite(site.species_and_occu,
frac_coords, reduced_latt,
to_unit_cell=True,
properties=site_props))
new_sites = sorted(new_sites)
return self.__class__.from_sites(new_sites)
def interpolate(self, end_structure, nimages=10,
interpolate_lattices=False, pbc=True):
"""
Interpolate between this structure and end_structure. Useful for
construction of NEB inputs.
Args:
end_structure (Structure): structure to interpolate between this
structure and end.
nimages (int): No. of interpolation images. Defaults to 10 images.
interpolate_lattices (bool): Whether to interpolate the lattices.
Interpolates the lengths and angles (rather than the matrix)
so orientation may be affected.
pbc (bool): Whether to use periodic boundary conditions to find
the shortest path between endpoints.
Returns:
List of interpolated structures. The starting and ending
structures included as the first and last structures respectively.
A total of (nimages + 1) structures are returned.
"""
#Check length of structures
if len(self) != len(end_structure):
raise ValueError("Structures have different lengths!")
if interpolate_lattices:
#interpolate lattices
lstart = np.array(self.lattice.lengths_and_angles)
lend = np.array(end_structure.lattice.lengths_and_angles)
lvec = lend - lstart
#Check that both structures have the same lattice
elif not self.lattice == end_structure.lattice:
raise ValueError("Structures with different lattices!")
#Check that both structures have the same species
for i in range(0, len(self)):
if self[i].species_and_occu != end_structure[i].species_and_occu:
raise ValueError("Different species!\nStructure 1:\n" +
str(self) + "\nStructure 2\n" +
str(end_structure))
start_coords = np.array(self.frac_coords)
end_coords = np.array(end_structure.frac_coords)
vec = end_coords - start_coords
if pbc:
vec -= np.round(vec)
sp = self.species_and_occu
structs = []
for x in range(nimages+1):
if interpolate_lattices:
l_a = lstart + x / nimages * lvec
l = Lattice.from_lengths_and_angles(*l_a)
else:
l = self.lattice
fcoords = start_coords + x / nimages * vec
structs.append(self.__class__(l, sp, fcoords,
site_properties=self.site_properties))
return structs
def get_primitive_structure(self, tolerance=0.25):
"""
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
The method works by finding possible smaller translations
and then using that translational symmetry instead of one of the
lattice basis vectors if more than one vector is found (usually the
case for large cells) the one with the smallest norm is used.
Things are done in fractional coordinates because its easier to
translate back to the unit cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance, the algorithm may find 2 non-equivalent sites that are
within tolerance of each other. The algorithm will reject this
lattice.
Args:
tolerance (float): Tolerance for each coordinate of a particular
site. For example, [0.5, 0, 0.5] in cartesian coordinates
will be considered to be on the same coordinates as
[0, 0, 0] for a tolerance of 0.5. Defaults to 0.5.
Returns:
The most primitive structure found. The returned structure is
guaranteed to have len(new structure) <= len(structure).
"""
original_volume = self.volume
#get the possible symmetry vectors
sites = sorted(self._sites, key=lambda site: site.species_string)
grouped_sites = [list(a[1]) for a
in itertools.groupby(sites,
key=lambda s: s.species_string)]
num_fu = reduce(gcd, map(len, grouped_sites))
min_vol = original_volume * 0.5 / num_fu
min_site_list = min(grouped_sites, key=lambda group: len(group))
min_site_list = [site.to_unit_cell for site in min_site_list]
org = min_site_list[0].coords
possible_vectors = [min_site_list[i].coords - org
for i in range(1, len(min_site_list))]
#Let's try to use the shortest vector possible first. Allows for faster
#convergence to primitive cell.
possible_vectors = sorted(possible_vectors,
key=lambda x: np.linalg.norm(x))
# Pre-create a few varibles for faster lookup.
all_coords = [site.coords for site in sites]
all_sp = [site.species_and_occu for site in sites]
new_structure = None
#all lattice points need to be projected to 0 under new basis
l_points = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0],
[1, 0, 1], [1, 1, 0], [1, 1, 1]])
l_points = self._lattice.get_cartesian_coords(l_points)
for v, repl_pos in itertools.product(possible_vectors, range(3)):
#Try combinations of new lattice vectors with existing lattice
#vectors.
latt = self._lattice.matrix
latt[repl_pos] = v
#Exclude coplanar lattices from consideration.
if abs(np.dot(np.cross(latt[0], latt[1]), latt[2])) < min_vol:
continue
latt = Lattice(latt)
#Convert to fractional tol
tol = tolerance / np.array(latt.abc)
#check validity of new basis
new_l_points = latt.get_fractional_coords(l_points)
f_l_dist = np.abs(new_l_points - np.round(new_l_points))
if np.any(f_l_dist > tol[None, None, :]):
continue
all_frac = latt.get_fractional_coords(np.array(all_coords))
#calculate grouping of equivalent sites, represented by
#adjacency matrix
fdist = all_frac[None, :, :] - all_frac[:, None, :]
fdist = np.abs(fdist - np.round(fdist))
groups = np.all(fdist < tol[None, None, :], axis=2)
#check that all group sizes are the same
sizes = np.unique(np.sum(groups, axis=0))
if len(sizes) > 1:
continue
#check that reduction in number of sites was by the same
#amount as the volume reduction
if round(self._lattice.volume / latt.volume) != sizes[0]:
continue
new_sp = []
new_frac = []
#this flag is set to ensure that all sites in a group are
#the same species, it is set to false if a group is found
#where this is not the case
correct = True
added = np.zeros(len(groups), dtype='bool')
for i, g in enumerate(groups):
if added[i]:
continue
indices = np.where(g)[0]
i0 = indices[0]
sp = all_sp[i0]
added[indices] = 1
if not all([all_sp[i] == sp for i in indices]):
correct = False
break
new_sp.append(all_sp[i0])
new_frac.append(all_frac[i0])
if correct:
new_structure = self.__class__(
latt, new_sp, new_frac, to_unit_cell=True)
break
if new_structure and len(new_structure) != len(self):
# If a more primitive structure has been found, try to find an
# even more primitive structure again.
return new_structure.get_primitive_structure(tolerance=tolerance)
else:
return self
def __repr__(self):
outs = ["Structure Summary", repr(self.lattice)]
for s in self:
outs.append(repr(s))
return "\n".join(outs)
def __str__(self):
outs = ["Structure Summary ({s})".format(s=str(self.composition)),
"Reduced Formula: {}"
.format(self.composition.reduced_formula)]
to_s = lambda x: "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10)
for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10)
for i in self.lattice.angles]))
outs.append("Sites ({i})".format(i=len(self)))
for i, site in enumerate(self):
outs.append(" ".join([str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(outs)
@property
def to_dict(self):
"""
Json-serializable dict representation of Structure
"""
latt_dict = self._lattice.to_dict
del latt_dict["@module"]
del latt_dict["@class"]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice": latt_dict, "sites": []}
for site in self:
site_dict = site.to_dict
del site_dict["lattice"]
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a Structure object from a dict representation of Structure
created using to_dict.
Args:
d (dict): Dict representation of structure.
Returns:
Structure object
"""
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
return cls.from_sites(sites)
def dot(self, coords_a, coords_b, space="r", frac_coords=False):
"""
Compute the scalar product of vector(s) either in real space or
reciprocal space.
Args:
coords (3x1 array): Array-like object with the coordinates.
space (str): "r" for real space, "g" for reciprocal space.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
Returns:
one-dimensional `numpy` array.
"""
lattice = {"r": self.lattice,
"g": self.reciprocal_lattice}[space.lower()]
return lattice.dot(coords_a, coords_b, frac_coords=frac_coords)
def norm(self, coords, space="r", frac_coords=True):
"""
Compute the norm of vector(s) either in real space or reciprocal space.
Args:
coords (3x1 array): Array-like object with the coordinates.
space (str): "r" for real space, "g" for reciprocal space.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
Returns:
one-dimensional `numpy` array.
"""
return np.sqrt(self.dot(coords, coords, space=space,
frac_coords=frac_coords))
class Structure(IStructure, collections.MutableSequence):
"""
Mutable version of structure. Much easier to use for editing,
but cannot be used as a key in a dict.
"""
__hash__ = None
def __init__(self, lattice, species, coords, validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=False,
site_properties=None,site_properties_T=None,
intensive_properties={}, extensive_properties={}):
"""
Create a periodic structure.
Args:
scale: scaling factor, real number
R: lattice vectors in rows. Note R*scale == lattice!!!
lattice: The lattice, either as a pymatgen.core.lattice.Lattice or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species: List of species on each site. Can take in flexible input,
including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
fractional_coords: list of fractional coordinates of each species.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
IStructure.__init__(
self, lattice, species, coords,
validate_proximity=validate_proximity, to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties,site_properties_T=site_properties_T,
intensive_properties=intensive_properties,extensive_properties=extensive_properties)
self._sites = list(self._sites)
def set_extra(self, scale, R):
self.lattice.set_R(scale, R)
def __setitem__(self, i, site):
"""
Modify a site in the structure.
Args:
i (int): Index
site (PeriodicSite/Specie/Sequence): Three options exist. You
can provide a PeriodicSite directly (lattice will be
checked). Or more conveniently, you can provide a
specie-like object or a tuple of up to length 3. Examples:
s[0] = "Fe"
s[0] = Element("Fe")
both replaces the species only.
s[0] = "Fe", [0.5, 0.5, 0.5]
Replaces site and *fractional* coordinates. Any properties
are inherited from current site.
s[0] = "Fe", [0.5, 0.5, 0.5], {"spin": 2}
Replaces site and *fractional* coordinates and properties.
"""
if isinstance(site, PeriodicSite):
if site.lattice != self._lattice:
raise ValueError("PeriodicSite added must have same lattice "
"as Structure!")
self._sites[i] = site
else:
if isinstance(site, str) or (not isinstance(site, collections.Sequence)):
sp = site
frac_coords = self._sites[i].frac_coords
properties = self._sites[i].properties
else:
sp = site[0]
frac_coords = site[1] if len(site) > 1 else self._sites[i]\
.frac_coords
properties = site[2] if len(site) > 2 else self._sites[i]\
.properties
self._sites[i] = PeriodicSite(sp, frac_coords, self._lattice,
properties=properties)
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
def append(self, species, coords, coords_are_cartesian=False,
validate_proximity=False, properties=None):
"""
Append a site to the structure.
Args:
species: Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
Returns:
New structure with inserted site.
"""
return self.insert(len(self), species, coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=properties)
def insert(self, i, species, coords, coords_are_cartesian=False,
validate_proximity=False, properties=None):
"""
Insert a site to the structure.
Args:
i (int): Index to insert site
species (species-like): Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
Returns:
New structure with inserted site.
"""
if not coords_are_cartesian:
new_site = PeriodicSite(species, coords, self._lattice,
properties=properties)
else:
frac_coords = self._lattice.get_fractional_coords(coords)
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def add_site_property(self, property_name, values):
"""
Adds a property to all sites.
Args:
property_name (str): The name of the property to add.
values: A sequence of values. Must be same length as number of
sites.
"""
if values is None:
return
if len(values) != len(self._sites):
raise ValueError("Values must be same length as sites.")
for i in range(len(self._sites)):
site = self._sites[i]
props = site.properties
if not props:
props = {}
props[property_name] = values[i]
self._sites[i] = PeriodicSite(site.species_and_occu,
site.frac_coords, self._lattice,
properties=props)
def replace_species(self, species_mapping):
"""
Swap species in a structure.
Args:
species_mapping (dict): Dict of species to swap. Species can be
elements too. e.g., {Element("Li"): Element("Na")} performs
a Li for Na substitution. The second species can be a
sp_and_occu dict. For example, a site with 0.5 Si that is
passed the mapping {Element('Si): {Element('Ge'):0.75,
Element('C'):0.25} } will have .375 Ge and .125 C.
"""
latt = self._lattice
species_mapping = {get_el_sp(k): v
for k, v in species_mapping.items()}
def mod_site(site):
new_atom_occu = collections.defaultdict(int)
for sp, amt in site.species_and_occu.items():
if sp in species_mapping:
if isinstance(species_mapping[sp], collections.Mapping):
for new_sp, new_amt in species_mapping[sp].items():
new_atom_occu[get_el_sp(new_sp)] \
+= amt * new_amt
else:
new_atom_occu[get_el_sp(
species_mapping[sp])] += amt
else:
new_atom_occu[sp] += amt
return PeriodicSite(new_atom_occu, site.frac_coords, latt,
properties=site.properties)
self._sites = map(mod_site, self._sites)
def replace(self, i, species, coords=None, coords_are_cartesian=False,
properties=None):
"""
Replace a single site. Takes either a species or a dict of species and
occupations.
Args:
i (int): Index of the site in the _sites list.
species (species-like): Species of replacement site
coords (3x1 array): Coordinates of replacement site. If None,
the current coordinates are assumed.
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
"""
if coords is None:
frac_coords = self[i].frac_coords
elif coords_are_cartesian:
frac_coords = self._lattice.get_fractional_coords(coords)
else:
frac_coords = coords
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
self._sites[i] = new_site
def remove_species(self, species):
"""
Remove all occurrences of several species from a structure.
Args:
species: Sequence of species to remove, e.g., ["Li", "Na"].
"""
new_sites = []
species = map(get_el_sp, species)
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species_and_occu.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(PeriodicSite(
new_sp_occu, site.frac_coords, self._lattice,
properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [self._sites[i] for i in range(len(self._sites))
if i not in indices]
def apply_operation(self, symmop):
"""
Apply a symmetry operation to the structure and return the new
structure. The lattice is operated by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop (SymmOp): Symmetry operation to apply.
"""
self._lattice = Lattice([symmop.apply_rotation_only(row)
for row in self._lattice.matrix])
def operate_site(site):
new_cart = symmop.operate(site.coords)
new_frac = self._lattice.get_fractional_coords(new_cart)
return PeriodicSite(site.species_and_occu, new_frac, self._lattice,
properties=site.properties)
self._sites = map(operate_site, self._sites)
def modify_lattice(self, new_lattice):
"""
Modify the lattice of the structure. Mainly used for changing the
basis.
Args:
new_lattice (Lattice): New lattice
"""
self._lattice = new_lattice
new_sites = []
for site in self._sites:
new_sites.append(PeriodicSite(site.species_and_occu,
site.frac_coords,
self._lattice,
properties=site.properties))
self._sites = new_sites
def apply_strain(self, strain):
"""
Apply a strain to the lattice.
Args:
strain (float or list): Amount of strain to apply. Can be a float,
or a sequence of 3 numbers. E.g., 0.01 means all lattice
vectors are increased by 1%. This is equivalent to calling
modify_lattice with a lattice with lattice parameters that
are 1% larger.
"""
s = (1 + np.array(strain)) * np.eye(3)
self.modify_lattice(Lattice(np.dot(self._lattice.matrix.T, s).T))
def translate_sites(self, indices, vector, frac_coords=True,
to_unit_cell=True):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices: Integer or List of site indices on which to perform the
translation.
vector: Translation vector for sites.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
if not isinstance(indices, collections.Iterable):
indices = [indices]
for i in indices:
site = self._sites[i]
if frac_coords:
fcoords = site.frac_coords + vector
else:
fcoords = self._lattice.get_fractional_coords(site.coords
+ vector)
new_site = PeriodicSite(site.species_and_occu, fcoords,
self._lattice, to_unit_cell=to_unit_cell,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance, to_unit_cell=True):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
#deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec(), frac_coords=False,to_unit_cell=to_unit_cell)
def displace_by(self, dr, to_unit_cell=True):
for i in range(len(self._sites)):
self.translate_sites([i], dr[i], frac_coords=False,to_unit_cell=to_unit_cell)
def from_displacement(self, dr, to_unit_cell=True):
s1= Structure(self._lattice, self.species_and_occu, self.frac_coords)
s1.displace_by(dr, to_unit_cell=to_unit_cell)
return s1
def add_oxidation_state_by_element(self, oxidation_states):
"""
Add oxidation states to a structure.
Args:
oxidation_states (dict): Dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[sym])] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except KeyError:
raise ValueError("Oxidation state of all elements must be "
"specified in the dictionary.")
def add_oxidation_state_by_site(self, oxidation_states):
"""
Add oxidation states to a structure by site.
Args:
oxidation_states (list): List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[i])] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the dictionary.")
def remove_oxidation_states(self):
"""
Removes oxidation states from a structure.
"""
for i, site in enumerate(self._sites):
new_sp = collections.defaultdict(float)
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Element(sym)] += occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def generate_supercell(self, scaling_matrix, scref=None):
"""
Create a supercell.
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
"""
scmat = np.array(scaling_matrix, np.int16)
if scmat.shape != (3, 3):
scmat= np.array(scmat* np.eye(3), np.int16)
n_cell=int(round(np.linalg.det(scmat)))
old_lattice = self._lattice
new_lattice = Lattice(np.dot(scmat, old_lattice.matrix))
tvects = supercell_latticepoints(scmat)
inv=np.linalg.inv(scmat)
if scref is None:
sc_ref= supercell_latticepoints(scmat)
else:
sc_ref= scref
return Structure(Lattice(np.dot(scmat, self._lattice.matrix)),
[s.species_and_occu for s in self for _ in range(n_cell)],
(self.frac_coords[:,None,:]+sc_ref[None,:,:]).reshape((-1,3)).dot(inv),
coords_are_cartesian=False, to_unit_cell=True,
site_properties_T=[s.properties for s in self for _ in range(n_cell)],
intensive_properties=self.intensive_properties,extensive_properties=
{k:v*self.n_cell for k,v in self.extensive_properties.items()})
def optimize_supercell(nsc1, maxIter=2000):
"""
search for optimal supercell shape (as cubic like as possible)
Args:
nsc1: positive means number of supercells targeted
negative means a certain number of atoms desired
maxIter: number of iterations
"""
nsc=nsc1
if nsc<0:
nsc=int(round(nsc/self.num_sites))
volsc = nsc*self.volume
invprim = np.linalg.inv(self.lattice)
ntry=0
bestsc=np.identity(3, dtype=int)
bestlen=999999.0
for i in range(maxIter):
scmat=1
def scale_lattice(self, volume):
"""
Performs a scaling of the lattice vectors so that length proportions
and angles are preserved.
Args:
volume (float): New volume of the unit cell in A^3.
"""
self.modify_lattice(self._lattice.scale(volume))
def ijkl2frac(self, ijkl):
"""
same but ijkl in one array
"""
return np.array(ijkl[:3]) + self._sites[int(ijkl[3])].frac_coords
def ijkl2cart(self, ijkl):
"""
:return: cartesian coordinates
"""
return np.dot(self.ijkl2frac(ijkl), self.lattice._matrix)
def frac2ijkl(self, coords, frac_coords=True, tolerance= 1E-3):
"""
Identify which atom corresponds to the specified coordinates
Args:
coords (3x1 array): Array-like object with the coordinates.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
Returns:
integer index for the identified site
"""
assert frac_coords == True
return Structure.frac2ijkl_fromapos(coords, self.frac_coords, tolerance)
@staticmethod
def frac2ijkl_fromapos(c_f, apos, tolerance):
for l in range(len(apos)):
rvec= c_f - apos[l]
rvec_frac = rvec - np.round(rvec)
if np.linalg.norm(rvec_frac) < tolerance:
return np.append(np.round(rvec), [l]).astype(int)
print('debug from apos', c_f)
raise ValueError("could not find [%f, %f, %f] in cell" % (c_f[0], c_f[1], c_f[2]))
@staticmethod
def ijkl_in_supercell(scmat, ijkl):
"""
:param scmat: supercell scaling matrix
:param refpts: list of lattice points within the supercell
:return: new ijkl in the supercell.
Asuming ijkl.inv(scmat) give new ijk, and refpts should be within the supercell spanned by scmat
"""
inv = np.linalg.inv(scmat)
nsc = abs(int(round(np.linalg.det(scmat))))
return _ijkl_in_supercell(nsc, inv, supercell_latticepoints(scmat).dot(inv), ijkl)
@staticmethod
def _ijkl_in_supercell(nsc, invscmat, refpts, ijkl):
newfrac= np.dot(ijkl[:3], invscmat)
newijkl = Structure.frac2ijkl_fromapos(newfrac, refpts, 1E-3)
newijkl[3] += ijkl[3]*nsc
return newijkl
def nbtable(self, r):
"""
r: cutoff distance
return a table: neighbor list of each atom
"""
if not hasattr(self, '_nbtable'):
self._nbtable = {}
if r in self._nbtable:
return self._nbtable[r]
recp_len = np.array(self.lattice.reciprocal_lattice.abc)
sr = r + 0.15
nmax = sr * recp_len / (2 * math.pi)
floor = math.floor
n = self.num_sites
fcoords = self.frac_coords
indices = np.array(range(n))
nbtable_per_atom = []
pmin = np.amin(fcoords, axis=0)
pmax = np.amax(fcoords, axis=0)
arange = np.arange(int(floor(pmin[0] - nmax[0])),
int(floor(pmax[0] + nmax[0])) + 1)
brange = np.arange(int(floor(pmin[1] - nmax[1])),
int(floor(pmax[1] + nmax[1])) + 1)
crange = np.arange(int(floor(pmin[2] - nmax[2])),
int(floor(pmax[2] + nmax[2])) + 1)
# print("debug arange=", arange.shape)
arange = arange[:, None] * np.array([1, 0, 0])[None, :]
brange = brange[:, None] * np.array([0, 1, 0])[None, :]
crange = crange[:, None] * np.array([0, 0, 1])[None, :]
# print("debug arange=", arange.shape, arange)
images = arange[:, None, None] + brange[None, :, None] + crange[None, None, :]
images = images.reshape((-1,3))
shifted_coords = fcoords[:, None, :] + images[None, :, :]
shifted_coords= shifted_coords.reshape((-1,3))
coords = self.lattice.get_cartesian_coords(shifted_coords)
ijkls = np.array([[img[0], img[1], img[2], l] for l in range(n) for img in images])
for i in range(n):
pct = self.cart_coords[i]
dists = np.array([np.sqrt(np.sum((p-pct) ** 2)) for p in coords])
within_r = np.where(dists <= r)
nbtable_per_atom.append(ijkls[within_r])
self._nbtable[r] = nbtable_per_atom
return nbtable_per_atom
def find_nb_cluster(self, ijkls, cut):
"""
cut: cutoff distance
Find atoms within cutoff of EVERY ijkl
"""
# print(ijkls)
nbtable = self.nbtable(cut)
nsite = ijkls.shape[0]
# print("nsite", nsite)
if nsite <=0:
raise ValueError("At least 1 atom in cluster needed")
# print("ijkls", ijkls)
atoms = []
for _atom in nbtable[ijkls[0][3]]:
atom = _atom.copy()
atom[:3] += ijkls[0][:3]
# print("testing", atom)
within = True
for j in range(1, nsite):
atom_wrt_j = atom.copy()
atom_wrt_j[:3] -= ijkls[j,:3]
within = False
for x in nbtable[ijkls[j][3]]:
if (atom_wrt_j == x).all():
within = True
break
if not within:
# print("atom_wrt_j", atom_wrt_j, "not in", ijkls[j,:])
break
if within:
atoms.append(atom)
return atoms
def get_scmat(self, sc_R):
"""
Given primitive cell (self) and supercell lattice vectors
:param sc_R: lattice vectors of supercell
:return: integer scaling matrix
"""
return np.dot(sc_R.lattice.matrix if isinstance(sc_R, Structure) else sc_R, self.lattice.inv_matrix).round().astype(int)
def map_to_prim(self, prim):
"""
Given supercell (self) and primitive cell, get supercell without displacement
:param prim: primitive
:return: supercell without displacement
"""
scmat = prim.get_scmat(self.lattice.matrix)
sc=prim.generate_supercell(scmat)
return self.map_to_reference(sc)
def map_to_reference(self, sc):
"""
Given supercell (self) and ideal supercell, get supercell without displacement
Assume that
:param sc: supercell
:return: supercell without displacement
"""
assert self.num_sites == sc.num_sites
dist_mat = self.lattice.get_all_distances(self.frac_coords, sc.frac_coords)
jmin = np.argmin(dist_mat, axis=1)
bad_i, bad_j = non_1to1(jmin)
if len(bad_i) > 0:
print("** WARNING** found %d conflicting mappings" % (len(bad_i)))
print(self.frac_coords[bad_i], "==>", sc.frac_coords[bad_j])
# try to resolve conflict
from itertools import permutations
min_dist = 1E99
solve_j = bad_j
for try_j in permutations(bad_j):
dist = np.sum([dist_mat[bad_i[i], try_j[i]] for i in range(len(bad_i))])
if dist < min_dist:
min_dist = dist
solve_j = [try_j]
jmin[bad_i] = solve_j
print(bad_j, solve_j)
print("resolved", self.frac_coords[bad_i], "==>", sc.frac_coords[solve_j])
for i in range(self.num_sites):
# print("%d %.4f" % (i, np.linalg.norm(self.frac_coords[i] - sc.frac_coords[jmin[i]])))
self[i].set_coords(np.round(self.frac_coords[i] - sc.frac_coords[jmin[i]]) + sc.frac_coords[jmin[i]], cart=False)
# self[i].set_coords(sc.frac_coords[jmin[i]], cart=False)
# print("%d %.4f" % (i, np.linalg.norm(self.frac_coords[i] - sc.frac_coords[jmin[i]])))
return self
def set_coords(self, c, cart=True):
# print('debug set_coords', c.shape)
for i in range(self.num_sites):
self[i].set_coords(c[i], cart=cart)
def get_order_wrt(self, p1, inverse=False, tol=1E-4):
from _c_util import get_structure_ordering
if p1 is None:
return list(range(self.num_sites))
if isinstance(p1, Structure):
assert (np.abs(self.lattice._matrix-p1.lattice._matrix)<1E-6).all(), "ERROR difference lattice"
pos= p1.frac_coords if isinstance(p1, Structure) else p1
if inverse:
ordering= get_structure_ordering(pos, self.frac_coords, 1, tol).tolist()
else:
ordering= get_structure_ordering(self.frac_coords, pos, 1, tol).tolist()
# if isinstance(p1, Structure):
# inSC = p1
# else:
# inSC = Structure(self.lattice, ['']*self.num_sites, p1)
# ordering= [inSC.frac2ijkl(pf)[3] for pf in self.frac_coords]
assert sorted(ordering) == list(range(self.num_sites))
return ordering
def to_spglib(self):
"""
To the 'cell' format (tuple) required by spglib
"""
unique_species = []
zs = []
magmoms = []
for species, g in itertools.groupby(self,
key=lambda s: s.species_and_occu):
if species in unique_species:
ind = unique_species.index(species)
zs.extend([ind + 1] * len(tuple(g)))
else:
unique_species.append(species)
zs.extend([len(unique_species)] * len(tuple(g)))
for site in self:
if hasattr(site, 'magmom'):
magmoms.append(site.magmom)
elif site.is_ordered and hasattr(site.specie, 'spin'):
magmoms.append(site.specie.spin)
else:
magmoms.append(0)
return self.lattice._matrix, self.frac_coords, zs, magmoms
def from_spglib(self, latt, fcoord, id):
spe= self.types_of_specie
return Structure(latt, [spe[i-1] for i in id], fcoord)
def standardize_cell(self, to_primitive=False, no_idealize=False, symprec=1e-5):
"""
call standardize_cell of spglib
"""
import spglib
cell=self.to_spglib()
return self.from_spglib(*spglib.standardize_cell(cell, to_primitive, no_idealize, symprec))
def match(self, p2, tol_match=0.15, tol_distinct=1.0):
"""
self: ideal (supercell) structure
p2: structure with distortion, defect and/or disorder
"""
from f_util import f_util
return f_util.match_structure(self.lattice.matrix.T, self.frac_coords.T, self.atomic_numbers, p2.frac_coords.T, p2.atomic_numbers, tol_match, tol_distinct)
class SupercellStructure(Structure):
"""
This class represents a Supercell related to SymmetrizedStructure by sc_mat matrix
Args:
prim: primitive cell
sc_mat: defining supercell lattice vectors as R_sc = sc_mat . R_prim
.. attribute: equivalent_indices
indices of structure grouped by equivalency
"""
def __init__(self, prim, sc_mat, strc, tol=1E-4, match_prim=True):
self.sc_mat = np.array(sc_mat, dtype=int)
n_cell = abs(np.linalg.det(self.sc_mat))
assert abs(n_cell - np.round(n_cell).astype(int))< 1E-10, "ncell= %f "%(n_cell)
self.n_cell = np.round(n_cell).astype(int)
self.prim = prim
self.inv_sc_mat = np.linalg.inv(sc_mat)
if True or match_prim:
strc.map_to_prim(prim)
strc.perturb(0, to_unit_cell=True) # move to_unit_cell
ijkl= np.array([prim.frac2ijkl(c) for c in strc.frac_coords.dot(sc_mat)])
self.ijk_ref= np.array([i[:3] for i in ijkl if i[3]==0])
self._ijkl = ijkl
self._ijkl_list = ijkl.tolist()
species=[prim[i[3]].species_and_occu for i in ijkl]
site_properties_T=[prim[i[3]].properties for i in ijkl]
else:
species=[s.species_and_occu for s in strc]
site_properties_T=[s.properties for s in strc]
self.sc_ref=self.ijk_ref
Structure.__init__(self, Lattice(np.dot(self.sc_mat, prim.lattice._matrix)), species,
strc.frac_coords, site_properties_T=site_properties_T,
intensive_properties=prim.intensive_properties,extensive_properties=
{k:v*self.n_cell for k,v in prim.extensive_properties.items()})
@classmethod
def from_scmat(cls, prim, scmat, scref= None):
return cls(prim, scmat, prim.generate_supercell(scmat, scref))
n_cell = abs(np.linalg.det(scmat))
assert abs(n_cell - np.round(n_cell).astype(int))< 1E-10, "ncell= %f "%(n_cell)
n_cell=int(n_cell)
newlattice = Lattice(np.dot(scmat, prim.lattice.matrix))
if scref is None:
sc_ref= supercell_latticepoints(scmat)
else:
sc_ref= scref
assert n_cell == len(sc_ref)
new_sites = []
for site in prim:
for t in sc_ref:
fcoords = site.frac_coords + t
coords = prim.lattice.get_cartesian_coords(fcoords)
new_site = PeriodicSite(
site.species_and_occu, coords, newlattice,
coords_are_cartesian=True, properties=site.properties,
to_unit_cell=True)
new_sites.append(new_site)
strc=Structure.__init__(newlattice, [s.species_and_occu for s in prim for j in range(n_cell)],
newfrac, site_properties_T=[s.properties for s in prim for j in range(n_cell)],
intensive_properties=prim.intensive_properties,extensive_properties=
{k:v*n_cell for k,v in prim.extensive_properties.items()})
return cls(prim, scmat, strc)
@classmethod
def from_structure(cls, prim, strc):
return cls(prim, prim.get_scmat(strc), strc)
@classmethod
def from_file(cls, prim, f):
from .interface_vasp import Poscar
strc= Poscar.from_file(f).structure
return cls(prim, prim.get_scmat(strc), strc)
def compatible_kpoints(self):
"""
return K-points compatible with this supercell
"""
return np.dot(supercell_latticepoints(self.sc_mat.T), self.inv_sc_mat.T)
def to_unperturbed(self):
"""
Map positions to unperturbed (prim) positions by the best match,
i.e. the --task 2 of polaron_main
"""
self.map_to_prim(self.prim)
class StructureError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
pass
|
"""Neural network operations."""
from __future__ import absolute_import as _abs
from ...expr import TupleWrapper
from . import _make
def conv2d(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
weight_layout="OIHW",
out_layout="",
out_dtype=""):
r"""2D convolution.
This operator takes the weight as the convolution kernel
and convolves it with data to produce an output.
In the default case, where the data_layout is `NCHW`
and weight_layout is `OIHW`, conv2d takes in
a data Tensor with shape `(batch_size, in_channels, height, width)`,
and a weight Tensor with shape `(channels, in_channels, kernel_size[0], kernel_size[1])`
to produce an output Tensor with the following rule:
.. math::
\mbox{out}[b, c, y, x] = \sum_{dy, dx, k}
\mbox{data}[b, k, \mbox{strides}[0] * y + dy, \mbox{strides}[1] * x + dx] *
\mbox{weight}[c, k, dy, dx]
Padding and dilation are applied to data and weight respectively before the computation.
This operator accepts data layout specification.
Semantically, the operator will convert the layout to the canonical layout
(`NCHW` for data and `OIHW` for weight), perform the computation,
then convert to the out_layout.
Parameters
----------
data : relay.Expr
The input data to the operator.
weight : relay.Expr
The weight expressions.
strides : tuple of int, optional
The strides of convoltution.
padding : tuple of int, optional
The padding of convolution on both sides of inputs before convolution.
dilation : tuple of int, optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
channels : int, optional
Number of output channels of this convolution.
kernel_size : tuple of int, optional
The spatial of the convolution kernel.
data_layout : str, optional
Layout of the input.
weight_layout : str, optional
Layout of the weight.
out_layout : str, optional
Layout of the output, by default, out_layout is the same as data_layout
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.conv2d(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
weight_layout, out_layout, out_dtype)
def conv2d_transpose(data,
weight,
strides=(1, 1),
padding=(0, 0),
dilation=(1, 1),
groups=1,
channels=None,
kernel_size=None,
data_layout="NCHW",
weight_layout="OIHW",
output_padding=(0, 0),
out_dtype=""):
"""Two dimensional trnasposed convolution operator.
Parameters
----------
data : relay.Expr
The input data to the operator.
weight : relay.Expr
The weight expressions.
strides : Tuple[int], optional
The strides of convoltution.
padding : Tuple[int], optional
The padding of convolution on both sides of inputs.
dilation : Tuple[int], optional
Specifies the dilation rate to be used for dilated convolution.
groups : int, optional
Number of groups for grouped convolution.
data_layout : str, optional
Layout of the input.
weight_layout : str, optional
Layout of the weight.
output_padding : Tuple[int], optional
Additional zero-padding to be added to one side of the output.
out_dtype : str, optional
Specifies the output data type for mixed precision conv2d.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.conv2d_transpose(data, weight, strides, padding, dilation,
groups, channels, kernel_size, data_layout,
weight_layout, output_padding, out_dtype)
def softmax(data, axis=1):
r"""Computes softmax.
.. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: relay.Expr
The input data to the operator.
axis: int, optional
The axis to sum over when computing softmax
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.softmax(data, axis)
def log_softmax(data, axis):
r"""Computes log softmax.
.. math::
\text{log_softmax}(x)_i = \log \frac{exp(x_i)}{\sum_j exp(x_j)}
.. note::
This operator can be optimized away for inference.
Parameters
----------
data: relay.Expr
The input data to the operator.
axis: int
The axis to sum over when computing softmax
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.log_softmax(data, axis)
def max_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False):
r"""2D maximum pooling operator.
This operator takes data as input and does 2D max value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w) and pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \max_{m=0, \ldots, kh-1} \max_{n=0, \ldots, kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
This operator accepts data layout specification.
Parameters
----------
data : relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.max_pool2d(data, pool_size, strides, padding,
layout, ceil_mode)
def avg_pool2d(data,
pool_size=(1, 1),
strides=(1, 1),
padding=(0, 0),
layout="NCHW",
ceil_mode=False,
count_include_pad=False):
r"""2D average pooling operator.
This operator takes data as input and does 2D average value calculation
with in pool_size sized window by striding defined by stride
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w), pool_size (kh, kw)
.. math::
\mbox{out}(b, c, y, x) = \frac{1}{kh * kw} \sum_{m=0}^{kh-1} \sum_{n=0}^{kw-1}
\mbox{data}(b, c, \mbox{stride}[0] * y + m, \mbox{stride}[1] * x + n)
Padding is applied to data before the computation.
ceil_mode is used to take ceil or floor while computing out shape.
count_include_pad indicates including or excluding padded input values in computation.
This operator accepts data layout specification.
Parameters
----------
data : relay.Expr
The input data to the operator.
strides : tuple of int, optional
The strides of pooling.
padding : tuple of int, optional
The padding for pooling.
layout : str, optional
Layout of the input.
ceil_mode : bool, optional
To enable or disable ceil while pooling.
count_include_pad : bool, optional
To include padding to compute the average.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.avg_pool2d(data, pool_size, strides, padding,
layout, ceil_mode, count_include_pad)
def global_max_pool2d(data,
layout="NCHW"):
r"""2D global maximum pooling operator.
This operator takes data as input and does 2D max value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \max_{m=0, \ldots, h} \max_{n=0, \ldots, w}
\mbox{data}(b, c, m, n)
Parameters
----------
data : relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.global_max_pool2d(data, layout)
def global_avg_pool2d(data,
layout="NCHW"):
r"""2D global average pooling operator.
This operator takes data as input and does 2D average value calculation
across each window represented by WxH.
In the default case, where the data_layout is `NCHW`
a data Tensor with shape `(batch_size, in_channels, height, width)`,
to produce an output Tensor with the following rule:
with data of shape (b, c, h, w)
.. math::
\mbox{out}(b, c, 1, 1) = \frac{1}{h * w} \sum_{m=0}^{h-1} \sum_{n=0}^{w-1}
\mbox{data}(b, c, m, n)
Parameters
----------
data : relay.Expr
The input data to the operator.
layout : str, optional
Layout of the input.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.global_avg_pool2d(data, layout)
def upsampling(data,
scale=1,
layout="NCHW",
method="NEAREST_NEIGHBOR"):
"""Upsampling.
This operator takes data as input and does 2D scaling to the given scale factor.
In the default case, where the data_layout is `NCHW`
with data of shape (n, c, h, w)
out will have a shape (n, c, h*scale, w*scale)
method indicates the algorithm to be used while calculating ghe out value
and method can be one of ("BILINEAR", "NEAREST_NEIGHBOR")
Parameters
----------
data : relay.Expr
The input data to the operator.
scale : relay.Expr
The scale factor for upsampling.
layout : str, optional
Layout of the input.
method : str, optional
Scale method to used [NEAREST_NEIGHBOR, BILINEAR].
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.upsampling(data, scale, layout, method)
def batch_flatten(data):
"""BatchFlatten.
This operator flattens all the dimensions except for the batch dimension.
which results a 2D output.
For data with shape ``(d1, d2, ..., dk)``
batch_flatten(data) returns reshaped output of shape ``(d1, d2*...*dk)``.
Parameters
----------
data : relay.Expr
The input data to the operator.
Returns
-------
result: relay.Expr
The Flattened result.
"""
return _make.batch_flatten(data)
def dense(data, weight, units=None):
"""Dense operator.
Applies a linear transformation
.. math::
`Y = X * W`
Parameters
----------
data : relay.Expr
The input data to the operator.
weight : relay.Expr
The weight expressions.
units : int, optional
Number of hidden units of the dense transformation.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.dense(data, weight, units)
def relu(data):
"""Rectified linear unit.
.. math::
out = max(x, 0)
Parameters
----------
data : relay.Expr
The input data
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.relu(data)
def leaky_relu(data, alpha):
"""This operator takes data as input and does Leaky version
of a Rectified Linear Unit.
.. math::
`y = x > 0 ? x : alpha * x`
Parameters
----------
data : relay.Expr
The input data to the operator.
alpha : float
Slope coefficient for the negative half axis.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.leaky_relu(data, alpha)
def pad(data,
pad_width,
pad_value=0.0):
r"""Padding
This operator takes in a tensor and pads each axis by the specified
widths using the specified value.
Parameters
----------
data: relay.Expr
The input data to the operator
pad_width: tuple of <tuple of <int>>, required
Number of values padded to the edges of each axis, in the format
of ((before_1, after_1), ..., (before_N, after_N))
pad_value: float, optional, default=0.0
The value used for padding
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.pad(data, pad_width, pad_value)
def lrn(data, size=5, axis=1, bias=2, alpha=.00001, beta=0.75):
"""This operator takes data as input and does local response normalization.
Normalize the input in a local region across or within feature maps.
Each input value is divided by (data / (bias + (alpha * sum_data ^2 /size))^beta)
where n is the size of each local region, and the sum is taken over the region
centered at that value (zero padding is added where necessary).
.. math::
(data / (bias + (alpha * sum_data ^2 /size))^beta)
Parameters
----------
data : relay.Expr
The input data to the operator.
size : int, optional
The size of the local region to be considered for normalization.
axis : int, optional
Input data layout channel axis. Default value is 1 for NCHW format
bias : float, optional
The offset parameter to avoid dividing by 0.
alpha : float, optional
The scaling parameter.
beta : float, optional
The exponent parameter.
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.lrn(data, size, axis, alpha, beta, bias)
def l2_normalize(data, eps, axis=None):
"""Perform L2 normalization on the input data
.. math::
y(i, j) = x(i, j) / sqrt(max(sum(x^2), eps))
Parameters
----------
data : relay.Expr
The input data to the operator.
eps : float
epsilon value
axis : list of int, optional
axis over the normalization applied
Returns
-------
result : relay.Expr
The computed result.
"""
return _make.l2_normalize(data, eps, axis)
def dropout(data, rate=0.5):
"""Applies the dropout operation to the input array.
During training, each element of the input is set to zero with
probability ``p``. The whole array is rescaled by ``1/(1-p)``
to keep the expected sum of the input unchanged.
Parameters
----------
data : relay.Expr
The input data to the operator.
rate : float, optional (default=0.5)
The probability for an element to be reset to 0.
Returns
-------
result : relay.Tuple([relay.Expr, relay.Expr])
The first member of the tuple is the result of dropping elements from ``data``
and rescaling. The second member is a "mask" tensor, which is of the same
shape and data type as ``data`` and, for each element in ``data``, is 1.0
if the element was not dropped and 0.0 if it was.
"""
result = _make.dropout(data, rate)
return TupleWrapper(result, 2)
def batch_norm(data, gamma, beta, moving_mean, moving_var,
axis=1, epsilon=1e-5, center=True, scale=True):
r"""
Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
.. math::
data\_mean[i] = mean(data[:,i,:,...]) \\
data\_var[i] = var(data[:,i,:,...])
Then compute the normalized output, which has the same shape as input, as following:
.. math::
out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}}
* gamma[i] + beta[i]
Both *mean* and *var* returns a scalar by treating the input as a vector.
Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta``
have shape *(k,)*.
Besides the inputs and the outputs, this operator accepts two auxiliary
states, ``moving_mean`` and ``moving_var``, which are *k*-length
vectors. They are global statistics for the whole dataset, which are updated by::
moving_mean = moving_mean * momentum + data_mean * (1 - momentum)
moving_var = moving_var * momentum + data_var * (1 - momentum)
The parameter ``axis`` specifies which axis of the input shape denotes
the 'channel' (separately normalized groups). The default is 1.
Specifying -1 sets the channel axis to be the last item in the input shape.
.. note::
This operator can be optimized away for inference.
Parameters
----------
data : relay.Expr
Input to which batch_norm will be applied.
gamma : relay.Expr
The gamma scale factor.
beta : relay.Expr
The beta offset factor.
moving_mean : relay.Expr
Running mean of input,
moving_var : relay.Expr
Running variance of input.
axis : int, optional, default=1
Specify along which shape axis the channel is specified.
epsilon : double, optional, default=1e-5
Small float added to variance to avoid diving by zero.
center : boolean, optional, default=True
If True, add offset of beta to normalized tensor, If False,
beta is ignored.
scale : boolean, optional, default=True
If true, multiply by gamma. If False, gamma is not used.
When the next layer is piecewise linear (also e.g. nn.relu),
this can be disabled since the scalingwill be done by the next layer.
Returns
-------
result : relay.Tuple([relay.Expr, relay.Expr, relay.Expr])
Tuple of normed data (same shape as input), new running mean (k-length vector),
and new running variance (k-length vector)
"""
result = _make.batch_norm(data, gamma, beta, moving_mean, moving_var,
axis, epsilon, center, scale)
return TupleWrapper(result, 3)
|
from django.conf.urls import patterns, url
urlpatterns = patterns('testproject.myapp.views',
url(r'^', 'home')
)
|
import logging
import os
from datetime import datetime
from OTLMOW.ModelGenerator.StringHelper import wrap_in_quotes
from OTLMOW.PostenMapping.PostenCollector import PostenCollector
from OTLMOW.PostenMapping.StandaardPost import StandaardPost
class PostenCreator:
def __init__(self, postenCollector: PostenCollector):
self.postenCollector = postenCollector
logging.info("Created an instance of PostenCreator")
self.datablock_lijst_import = []
self.datablock_lijst = []
def create_all_mappings(self):
logging.info('started creating model at ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
self.postenCollector.collect()
self.combine_mappings_and_posten()
self.create_standaardposten()
logging.info('finished creating model at ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
def create_standaardposten(self):
for post in self.postenCollector.standaardposten:
try:
dataToWrite = self.create_datablock_from_post(post)
if dataToWrite is None:
logging.info(f"Could not create a class for {post.nummer}")
pass
if len(dataToWrite) == 0:
logging.info(f"Could not create a class for {post.nummer}")
pass
self.writeToFile(post, dataToWrite)
logging.info(f"Created a class for {post.nummer}")
self.add_to_lijst(post)
except BaseException as e:
logging.error(str(e))
logging.error(f"Could not create a class for {post.nummer}")
self.create_lijst()
def combine_mappings_and_posten(self):
for post in self.postenCollector.standaardposten:
mappings = self.postenCollector.find_mappings_by_postnummer(post.nummer)
post.mappings = mappings
def create_datablock_from_post(self, post: StandaardPost) -> [str]:
datablock = ['# coding=utf-8',
"from OTLMOW.PostenMapping.StandaardPost import StandaardPost",
"from OTLMOW.PostenMapping.StandaardPostMapping import StandaardPostMapping",
"", "",
'# Generated with PostenCreator. To modify: extend, do not edit',
f"class Post{post.nummer.replace('.', '')}(StandaardPost):",
" def __init__(self):",
" super().__init__(",
f" nummer='{post.nummer}',",
f" beschrijving={wrap_in_quotes(post.beschrijving)},",
f" meetstaateenheid='{post.meetstaateenheid}',",
" mappings=[StandaardPostMapping("]
for mapping in post.mappings:
datablock.append(f" typeURI={wrap_in_quotes(mapping.typeURI)},")
datablock.append(f" attribuutURI={wrap_in_quotes(mapping.attribuutURI)},")
datablock.append(f" dotnotatie={wrap_in_quotes(mapping.dotnotatie)},")
datablock.append(f" defaultWaarde={wrap_in_quotes(mapping.defaultWaarde)},")
datablock.append(f" range={wrap_in_quotes(mapping.range)},")
datablock.append(f" usagenote={wrap_in_quotes(mapping.usagenote)},")
datablock.append(f" isMeetstaatAttr={mapping.isMeetstaatAttr},")
datablock.append(f" isAltijdInTeVullen={mapping.isAltijdInTeVullen},")
datablock.append(f" isBasisMapping={mapping.isBasisMapping},")
datablock.append(f" mappingStatus={wrap_in_quotes(mapping.mappingStatus)},")
datablock.append(f" mappingOpmerking={wrap_in_quotes(mapping.mappingOpmerking)},")
datablock.append(f" standaardpostnummer='{mapping.standaardpostnummer}')")
datablock.append(f' , StandaardPostMapping(')
datablock.pop(-1)
datablock[-1] = datablock[-1] + '])'
return datablock
@staticmethod
def writeToFile(post: StandaardPost, dataToWrite: [str]):
base_dir = os.path.dirname(os.path.realpath(__file__))
path = f"{base_dir}/../PostenMapping/Model/Post{post.nummer.replace('.', '')}.py"
with open(path, "w", encoding='utf-8') as file:
for line in dataToWrite:
file.write(line + "\n")
def add_to_lijst(self, post: StandaardPost):
self.datablock_lijst_import.append(
f"from OTLMOW.PostenMapping.Model.Post{post.nummer.replace('.', '')} import Post{post.nummer.replace('.', '')}")
self.datablock_lijst.append(f" '{post.nummer}': Post{post.nummer.replace('.', '')}(),")
def create_lijst(self):
datablock = []
datablock.extend(self.datablock_lijst_import)
datablock.append("")
datablock.append("")
datablock.append("class PostenLijst:")
datablock.append(" def __init__(self):")
datablock.append(" self.lijst = {")
datablock.extend(self.datablock_lijst)
datablock[-1] = datablock[-1][:-1]
datablock.append(" }")
base_dir = os.path.dirname(os.path.realpath(__file__))
path = f"{base_dir}/../PostenMapping/PostenLijst.py"
with open(path, "w", encoding='utf-8') as file:
for line in datablock:
file.write(line + "\n")
|
import numpy as np
import matplotlib.pyplot as plt
W = np.loadtxt('sol.csv', skiprows=1, delimiter=',')
# Density of solid phase
plt.figure(1)
plt.plot(W[:,0], W[:,1], linestyle='-', linewidth =0.3, color='black', marker = 'o', mew=1.0, ms=1.0)
plt.xlabel(r"$x$")
plt.ylabel(r"$\rho_s$")
plt.savefig("RHO_s.pdf")
# Density of gas phase
plt.figure(2)
plt.plot(W[:,0], W[:,4], linestyle='-', linewidth =0.3, color='black', marker = 'o', mew=1.0, ms=1.0)
plt.xlabel(r"$x$")
plt.ylabel(r"$\rho_g$")
plt.savefig("RHO_g.pdf")
"""
plt.figure(1)
plt.plot(W[:,0], W[:,3], linestyle='-', linewidth =0.3, marker = 'o', mew=1.0, ms=1.0, label='solid-phase')
plt.plot(W[:,0], W[:,6], linestyle='-', linewidth =0.3, marker = 'o', mew=1.0, ms=1.0, label='gas-phase')
plt.xlabel(r"$x$")
plt.ylabel(r"$p$")
plt.legend()
plt.savefig("P.pdf")
plt.figure(2)
plt.plot(W[:,0], W[:,2], linestyle='-', linewidth =0.3, marker = 'o', mew=1.0, ms=1.0, label='solid-phase')
plt.plot(W[:,0], W[:,5], linestyle='-', linewidth =0.3, marker = 'o', mew=1.0, ms=1.0, label='gas-phase')
plt.xlabel(r"$x$")
plt.ylabel(r"$u$")
plt.legend()
plt.savefig("U.pdf")
"""
|
from google.cloud import automl
from core_engine import logger
logging = logger(__name__)
def evaluate_model(
project_id: str, region: str, model_id: str, evlauation_filter: str = ""
):
"""[Evalaute Model Summary]
Args:
project_id (str): [Unique Identifier for your Project]
region (str): [Region]
model_id (str): [Unique Identifier for your Model]
evlauation_filter (str, optional): [description]. Defaults to "".
Raises:
error: [Error]
Returns:
[type]: [description]
"""
try:
logging.info(f"Evaluate Model: {project_id}")
logging.info(f"{model_id=}")
client = automl.AutoMlClient()
model_full_id = client.model_path(project_id, region, model_id)
return client.list_model_evaluations(
parent=model_full_id, filter=evlauation_filter
)
except Exception as error:
logging.error(f"{error=}")
raise error
|
# https://leetcode.com/problems/flip-game/submissions/
class Solution:
def generatePossibleNextMoves(self, s):
"""
:type s: str
:rtype: List[str]
"""
out = []
for i in range(0, len(s) - 1):
if s[i:i+2] == '++': out.append(s[:i] + '--' + s[i+2:])
return out
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : 526614962@qq.com
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : deep-learning-with-python-notebooks
@File : ch0503_pretained_convnet.py
@Version : v0.1
@Time : 2019-11-21 10:47
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《Python 深度学习,Francois Chollet》, Sec0503,P115
@Desc : 深度学习用于计算机视觉,使用预训练的卷积神经网络
"""
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import winsound
from keras import activations
from keras import layers
from keras import losses
from keras import metrics
from keras import models
from keras import optimizers
from keras.applications import VGG16
from keras.preprocessing.image import ImageDataGenerator
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 3, suppress = True, threshold = np.inf, linewidth = 200)
# to make this notebook's output stable across runs
seed = 42
np.random.seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# VGG16()的三个参数
# weights:模型初始化的权重检查点
# include_top:指定模型最后是否包含密集连接分类器(ImageNet的密集连接分类器对应于1000个类别),本例中使用自己的密集连接分类器(对应两个类别:cat和dog)
# input_shape:输入到网络中的图像张量的形状。如果不传入参数,网络可以处理任意形状的输入。
conv_base = VGG16(weights = 'imagenet', include_top = False, input_shape = (150, 150, 3))
vgg16_out_dims_product = 4 * 4 * 512
# 注:(4,4,512)是VGG16网络输出的最后一层的维度,具体参考 summary() 函数
# conv_base.summary()
base_dir = "C:/Users/Administrator/PycharmProjects/Data/small_datasets"
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
epochs = 30
batch_size = 20
data_gen = ImageDataGenerator(rescale = 1. / 255)
def extract_feature(directory, sample_count):
features = np.zeros(shape = (sample_count, 4, 4, 512))
labels = np.zeros(shape = (sample_count,))
generator = data_gen.flow_from_directory(
directory, target_size = (150, 150), batch_size = batch_size, class_mode = 'binary'
)
print("\t 数据总量 = {},数据处理中...".format(sample_count))
step = (sample_count // batch_size) // 10
for i, (inputs_batch, labels_batch) in enumerate(generator):
if i % step == 0:
print("\t 正在处理第{}个数据".format(i * batch_size + 1))
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size:(i + 1) * batch_size] = features_batch
labels[i * batch_size:(i + 1) * batch_size] = labels_batch
# 生成器在循环中不断生成数据,所以必须在读取完所有图像后终止循环
if (i + 1) * batch_size >= sample_count:
break
return features, labels
print("处理验证数据集的图片")
validation_features, validation_labels = extract_feature(validation_dir, 1000)
validation_features = np.reshape(validation_features, (1000, vgg16_out_dims_product))
print("处理训练数据集的图片")
train_features, train_labels = extract_feature(train_dir, 2000)
train_features = np.reshape(train_features, (2000, vgg16_out_dims_product))
# print("处理临时验证数据集的图片,利用临时数据集加快训练速度,发现代码中的问题")
# validation_features, validation_labels = extract_feature(os.path.join(base_dir, 'tmp_val'), 200)
# validation_features = np.reshape(validation_features, (200, vgg16_out_dims_product))
# print("处理临时训练数据集的图片,利用临时数据集加快训练速度,发现代码中的问题")
# train_features, train_labels = extract_feature(os.path.join(base_dir, 'tmp_train'), 200)
# train_features = np.reshape(train_features, (200, vgg16_out_dims_product))
# print("处理测试数据集的图片")
# test_features, test_labels = extract_feature(test_dir, 1000)
# test_features = np.reshape(test_features, (1000, vgg16_out_dims_product))
print("构造模型")
model = models.Sequential()
model.add(layers.Dense(256, activation = activations.relu, input_dim = vgg16_out_dims_product))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation = activations.sigmoid))
model.compile(optimizer = optimizers.rmsprop(lr = 2e-5), loss = losses.binary_crossentropy,
metrics = [metrics.binary_accuracy])
print("训练模型")
# 密集网络层使用多处理并发计算不会死锁,可能卷积网络层使用并发操作可能会发生死锁
history = model.fit(train_features, train_labels, epochs = epochs, batch_size = batch_size,
validation_data = [validation_features, validation_labels], verbose = 2,
use_multiprocessing = True)
binary_accuracy = history.history['binary_accuracy']
val_binary_accuracy = history.history['val_binary_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(1, epochs + 1)
plt.plot(epochs_range, binary_accuracy, 'bo', label = "训练集的精确度")
plt.plot(epochs_range, val_binary_accuracy, 'r-', label = "验证集的精确度")
plt.title("图5-15:简单特征提取的训练精度和验证精度")
plt.legend()
plt.figure()
plt.plot(epochs_range, loss, 'bo', label = "训练集的损失")
plt.plot(epochs_range, val_loss, 'r-', label = "验证集的损失")
plt.title("图5-16:简单特征提取的训练损失和验证损失")
plt.legend()
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
|
# RUN: %PYTHON %s | FileCheck %s
from mlir.ir import *
from mlir.dialects import quant
def run(f):
print("\nTEST:", f.__name__)
f()
return f
# CHECK-LABEL: TEST: test_type_hierarchy
@run
def test_type_hierarchy():
with Context():
i8 = IntegerType.get_signless(8)
any = Type.parse("!quant.any<i8<-8:7>:f32>")
uniform = Type.parse("!quant.uniform<i8<-8:7>:f32, 0.99872:127>")
per_axis = Type.parse("!quant.uniform<i8:f32:1, {2.0e+2,0.99872:120}>")
calibrated = Type.parse("!quant.calibrated<f32<-0.998:1.2321>>")
assert not quant.QuantizedType.isinstance(i8)
assert quant.QuantizedType.isinstance(any)
assert quant.QuantizedType.isinstance(uniform)
assert quant.QuantizedType.isinstance(per_axis)
assert quant.QuantizedType.isinstance(calibrated)
assert quant.AnyQuantizedType.isinstance(any)
assert quant.UniformQuantizedType.isinstance(uniform)
assert quant.UniformQuantizedPerAxisType.isinstance(per_axis)
assert quant.CalibratedQuantizedType.isinstance(calibrated)
assert not quant.AnyQuantizedType.isinstance(uniform)
assert not quant.UniformQuantizedType.isinstance(per_axis)
# CHECK-LABEL: TEST: test_any_quantized_type
@run
def test_any_quantized_type():
with Context():
i8 = IntegerType.get_signless(8)
f32 = F32Type.get()
any = quant.AnyQuantizedType.get(quant.QuantizedType.FLAG_SIGNED, i8, f32,
-8, 7)
# CHECK: flags: 1
print(f"flags: {any.flags}")
# CHECK: signed: True
print(f"signed: {any.is_signed}")
# CHECK: storage type: i8
print(f"storage type: {any.storage_type}")
# CHECK: expressed type: f32
print(f"expressed type: {any.expressed_type}")
# CHECK: storage min: -8
print(f"storage min: {any.storage_type_min}")
# CHECK: storage max: 7
print(f"storage max: {any.storage_type_max}")
# CHECK: storage width: 8
print(f"storage width: {any.storage_type_integral_width}")
# CHECK: quantized element type: !quant.any<i8<-8:7>:f32>
print(f"quantized element type: {any.quantized_element_type}")
# CHECK: !quant.any<i8<-8:7>:f32>
print(any)
assert any == Type.parse("!quant.any<i8<-8:7>:f32>")
# CHECK-LABEL: TEST: test_uniform_type
@run
def test_uniform_type():
with Context():
i8 = IntegerType.get_signless(8)
f32 = F32Type.get()
uniform = quant.UniformQuantizedType.get(
quant.UniformQuantizedType.FLAG_SIGNED, i8, f32, 0.99872, 127, -8, 7)
# CHECK: scale: 0.99872
print(f"scale: {uniform.scale}")
# CHECK: zero point: 127
print(f"zero point: {uniform.zero_point}")
# CHECK: fixed point: False
print(f"fixed point: {uniform.is_fixed_point}")
# CHECK: !quant.uniform<i8<-8:7>:f32, 9.987200e-01:127>
print(uniform)
assert uniform == Type.parse("!quant.uniform<i8<-8:7>:f32, 0.99872:127>")
# CHECK-LABEL: TEST: test_uniform_per_axis_type
@run
def test_uniform_per_axis_type():
with Context():
i8 = IntegerType.get_signless(8)
f32 = F32Type.get()
per_axis = quant.UniformQuantizedPerAxisType.get(
quant.QuantizedType.FLAG_SIGNED,
i8,
f32, [200, 0.99872], [0, 120],
quantized_dimension=1,
storage_type_min=quant.QuantizedType.default_minimum_for_integer(
is_signed=True, integral_width=8),
storage_type_max=quant.QuantizedType.default_maximum_for_integer(
is_signed=True, integral_width=8))
# CHECK: scales: None
print(f"scales: {per_axis.scales}")
# CHECK: zero_points: None
print(f"zero_points: {per_axis.zero_points}")
# CHECK: quantized dim: 1
print(f"quantized dim: {per_axis.quantized_dimension}")
# CHECK: fixed point: False
print(f"fixed point: {per_axis.is_fixed_point}")
# CHECK: !quant.uniform<i8:f32:1, {2.000000e+02,9.987200e-01:120}>
print(per_axis)
assert per_axis == Type.parse(
"!quant.uniform<i8:f32:1, {2.0e+2,0.99872:120}>")
# CHECK-LABEL: TEST: test_calibrated_type
@run
def test_calibrated_type():
with Context():
f32 = F32Type.get()
calibrated = quant.CalibratedQuantizedType.get(f32, -0.998, 1.2321)
# CHECK: min: -0.998
print(f"min: {calibrated.min}")
# CHECK: max: 1.2321
print(f"max: {calibrated.max}")
# CHECK: !quant.calibrated<f32<-0.998:1.232100e+00>>
print(calibrated)
assert calibrated == Type.parse("!quant.calibrated<f32<-0.998:1.2321>>")
|
from django.contrib import admin
from .models import Post, Comment, Like, Follow
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Like)
admin.site.register(Follow)
|
# -*- coding: utf-8 -*-
import json
import os
import shutil
import subprocess
import sys
import time
# environ
PYTHON27 = os.environ['PYTHON27']
ROOT = os.path.realpath(os.path.dirname(__file__))
# useful paths
LOGS_PATH = os.getenv('BROAPT_LOGS_PATH', '/var/log/bro/')
APK_LOG = os.getenv('APK_LOG', '/var/log/bro/tmp/')
os.makedirs(APK_LOG, exist_ok=True)
# return codes
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
def main():
mime = os.environ['BROAPT_MIME']
path = os.environ['BROAPT_PATH']
name = os.path.split(path)[1]
dirname = os.path.splitext(name)[0]
tempdir = os.path.join(APK_LOG, dirname)
# move target to a temporary directory
shutil.copyfile(path, os.path.join(tempdir, name))
# prepare arguments
args = [PYTHON27, 'androPyTool.py', '-s', tempdir]
args.extend(sys.argv[1:])
# prepare environment
cwd = os.path.join(ROOT, 'AndroPyTool')
env = os.environ
# run command
subprocess.check_call(args, cwd=cwd, env=env)
# check output
if os.path.exists(os.path.join(tempdir, 'MW', name)):
rate = True
elif os.path.exists(os.path.join(tempdir, 'BW', name)):
rate = False
else:
return EXIT_FAILURE
result = {'time': time.time(),
'path': path,
'mime': mime,
'rate': rate}
with open(os.path.join(LOGS_PATH, 'rate.log'), 'at', 1) as file:
print(json.dumps(result), file=file)
return EXIT_SUCCESS
if __name__ == '__main__':
sys.exit(main())
|
import argparse, os, math
from theia.channels import invert_with_alpha
from theia.image import load_from_path
from PIL import Image
def main(args):
os.makedirs(args.output, exist_ok=True)
images = load_from_path(args.input)
for (name, img) in images:
# Invert the image
if args.invert:
img = invert_with_alpha(img)
# Pad the image to the given dimensions
if args.padded:
pad = args.padded
canvas = Image.new("RGBA", (pad, pad), color=(255, 255, 255, 0))
corner = (
math.floor((pad - img.size[0]) / 2),
math.floor((pad - img.size[1]) / 2),
)
canvas.paste(img, corner, img)
img = canvas
# Save to new location
# If input = output, this will overwrite
img.save(os.path.join(args.output, f"{name}.png"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input")
parser.add_argument("output")
parser.add_argument("--invert", action="store_true")
parser.add_argument("--padded", type=int)
args = parser.parse_args()
main(args)
|
import argparse
import numpy
def chunk(thing, size):
x = 0
out = []
for val in thing:
out.append(val)
x += 1
if x % size == 0:
yield out
out = []
x = 0
if out:
yield out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input')
parser.add_argument('--rows', type=int, default=0, help='Reshape before printing')
parser.add_argument('--cols', type=int, default=0, help='Reshape before printing')
args = parser.parse_args()
data = numpy.load(args.input)#, dtype=numpy.float32)
if args.rows or args.cols:
data = data.reshape(args.rows, args.cols)
print(' '.join(str(x) for x in data.shape))
for line in chunk(data.flatten(), 32):
for v in line:
print(v, end=" ")
print()
|
from functools import lru_cache
import menpo.io as mio
import menpo3d.io as m3io
import numpy as np
# load the maps between LSFM/Basel etc
@lru_cache()
def map_tddfa_to_basel():
maps = mio.import_pickle(
'/vol/atlas/databases/itwmm/mapping_mein3d_to_tddfa.pkl.gz')
return maps['map_tddfa_to_basel']
def fw_to_fw_cropped():
return mio.import_pickle('/vol/atlas/databases/itwmm/3ddfa_to_trimmed_no_neck_mask.pkl.gz')
@lru_cache()
def template_fw():
return mio.import_pickle('/vol/atlas/databases/itwmm/mein3d_fw_correspond_mean.pkl.gz')
def template_fw_cropped():
return template_fw().from_mask(fw_to_fw_cropped())
# Remappings between BFM [] - Face warehouse [fw] - Face warehouse cropped [fwc]
def map_basel_shape_model_to_fw(shape_model):
shape_model = shape_model.copy()
c = shape_model._components.reshape([shape_model._components.shape[0], -1, 3])
shape_model._components = c[:, map_tddfa_to_basel()].reshape([shape_model._components.shape[0], -1])
shape_model._mean = shape_model._mean.reshape([-1, 3])[map_tddfa_to_basel()].ravel()
shape_model.template_instance = template_fw().from_vector(shape_model._mean)
return shape_model
def map_basel_shape_model_to_fwc(shape_model):
shape_model = shape_model.copy()
c = shape_model._components.reshape([shape_model._components.shape[0], -1, 3])
shape_model._components = c[:, map_tddfa_to_basel()][:, fw_to_fw_cropped()].reshape([shape_model._components.shape[0], -1])
shape_model._mean = shape_model._mean.reshape([-1, 3])[map_tddfa_to_basel()][fw_to_fw_cropped()].ravel()
shape_model.template_instance = template_fw_cropped().from_vector(shape_model._mean)
return shape_model
def map_basel_texture_model_to_fw(texture_model):
texture_model = texture_model.copy()
c = texture_model._components.reshape([texture_model._components.shape[0], -1, 3])
texture_model._components = c[:, map_tddfa_to_basel()].reshape([texture_model._components.shape[0], -1])
texture_model._mean = texture_model._mean.reshape([-1, 3])[map_tddfa_to_basel()].ravel()
return texture_model
def map_basel_texture_model_to_fwc(texture_model):
texture_model = texture_model.copy()
c = texture_model._components.reshape([texture_model._components.shape[0], -1, 3])
texture_model._components = c[:, map_tddfa_to_basel()][:, fw_to_fw_cropped()].reshape([texture_model._components.shape[0], -1])
texture_model._mean = texture_model._mean.reshape([-1, 3])[map_tddfa_to_basel()][fw_to_fw_cropped()].ravel()
return texture_model
# Remap basel landmarks to fw landmarks by expressing as fw indices
def fw_index_for_lms():
basel_model, landmarks = load_basel_shape()
basel_mean = basel_model.mean()
basel_index = np.argmin(basel_mean.distance_to(landmarks), axis=0)
m = np.ones(basel_mean.n_points) * -1
m[basel_index] = np.arange(68)
poses = np.where((m[map_tddfa_to_basel()] >= 0))[0]
new_ids = m[map_tddfa_to_basel()][poses]
return poses[np.argsort(new_ids)]
def load_basel_shape():
shape_model = mio.import_pickle('/vol/atlas/databases/lsfm/shape_PCAModel.pkl', encoding='latin1')
landmarks = m3io.import_landmark_file('./template.ljson').lms
return shape_model, landmarks
def load_basel_texture():
return mio.import_pickle('./texture_PCAModel.pkl', encoding='latin1')
def load_basel_shape_fw():
shape_model, landmarks = load_basel_shape()
return map_basel_shape_model_to_fw(shape_model), landmarks
def load_basel_shape_fwc():
shape_model, landmarks = load_basel_shape()
return map_basel_shape_model_to_fwc(shape_model), landmarks
def load_basel_texture_fw():
return map_basel_texture_model_to_fw(load_basel_texture())
def load_basel_texture_fwc():
return map_basel_texture_model_to_fwc(load_basel_texture())
def load_lsfm_shape_fwc():
tr = mio.import_pickle('/vol/atlas/databases/lsfm/corrective_translation.pkl')
shape_model = mio.import_pickle('/vol/atlas/databases/lsfm/lsfm_shape_model_fw_cropped.pkl')
landmarks = tr.apply(m3io.import_landmark_file('template.ljson').lms)
return shape_model, landmarks
def load_lsfm_texture_fwc():
return mio.import_pickle('/vol/atlas/databases/lsfm/colour_pca_model_fw_cropped.pkl')
def load_lsfm_combined_fw():
shape_model = mio.import_pickle('/vol/atlas/databases/lsfm/combined_model.pkl')
landmarks = m3io.import_landmark_file('template.ljson').lms.from_vector(shape_model.mean().points[fw_index_for_lms()])
return shape_model, landmarks
def load_basel_combined_fw():
shape_model = mio.import_pickle('/vol/atlas/databases/lsfm/basel_combined_model_fw.pkl')
landmarks = m3io.import_landmark_file('template.ljson').lms.from_vector(shape_model.mean().points[fw_index_for_lms()])
return shape_model, landmarks
def load_itwmm_texture_rgb_fwc():
return mio.import_pickle('/vol/atlas/databases/itwmm/itwmm_texture/per_vertex_fw_cropped/rgb/rgb_per_vertex_fw_cropped_texture_model.pkl')
def load_itwmm_texture_fast_dsift_fwc():
return mio.import_pickle('/vol/atlas/databases/itwmm/itwmm_texture/per_vertex_fw_cropped/fast_dsift/pca_model.pkl')
def load_itwmm_texture_fast_dsift_fw():
return mio.import_pickle('/vol/atlas/databases/itwmm/itwmm_texture/per_vertex_fw/fast_dsift.pkl')
def load_itwmm_texture_rgb_fw():
return mio.import_pickle('/vol/atlas/databases/itwmm/itwmm_texture/per_vertex_fw/rgb.pkl')
def load_itwmm_texture_no_mask_fast_dsift_fw():
return mio.import_pickle('/vol/atlas/databases/itwmm/itwmm_texture/per_vertex_fw_no_mask/fast_dsift.pkl')
def load_itwmm_texture_no_mask_rgb_fw():
return mio.import_pickle('/vol/atlas/databases/itwmm/itwmm_texture/per_vertex_fw_no_mask/rgb.pkl')
def load_fw_mean_id_expression_fw():
shape_model = mio.import_pickle('/vol/atlas/databases/lsfm/expression_model_id_mean.pkl')
landmarks = m3io.import_landmark_file('template.ljson').lms.from_vector(shape_model.mean().points[fw_index_for_lms()])
return shape_model, landmarks
def load_fw_expression_fwc():
expression_model = mio.import_pickle('./identity_texture_emotion.pkl')['expression']
expression_model._components /= 100000
expression_model._mean /= 100000
tr = mio.import_pickle('/vol/atlas/databases/lsfm/corrective_translation.pkl')
expression_model._components = tr.apply(expression_model._components.reshape(29, -1, 3)).reshape(29, -1)
expression_model._mean = tr.apply(expression_model._mean.reshape(-1, 3)).reshape(-1)
expression_model.n_active_components = 5
return expression_model
|
import pandas as pd
submission = pd.read_csv(
"/data/zhaoxinying/code/rsna-miccai-2021/user_data/preds/submission.csv",
index_col="BraTS21ID")
ids = [1, 13]
print(ids)
y_pred = [0.8, 0.6]
preddf = pd.DataFrame({"BraTS21ID": ids, "MGMT_value": y_pred})
preddf = preddf.set_index("BraTS21ID")
submission["MGMT_value"] = 0
submission["MGMT_value"] += preddf["MGMT_value"]
# print(submission["MGMT_value"])
print(submission["MGMT_value"])
submission["MGMT_value"].to_csv("/data/zhaoxinying/code/rsna-miccai-2021/user_data/preds/efficientnet3d_b0_lr0.0003_aug256_2/submission_test.csv")
|
"""
Conjuntos - Referência a teoría dos conjuntos em Matématica
#Forma 1
s = set({1, 2, 3, 4, 5, 5, 6, 7, 2, 3})
print(s)
print(type(s))
#Forma 2
s = set({1, 2, 3, 4, 5, 5})
print(s)
print(type(s))
s1 = {1, 'b', True, 34.22, 44}
print(s1)
print(type(s1))
#Usos interessantes com sets
#Adicionar Elementos em conjuntos
s = {1, 2, 3}
print(s)
s.add(4)
print(s)
#Remover Elementos em conjuntos
s = {1, 2, 3}
#Forma 1
print(s)
s.remove(1)
print(s)
#Forma 2
print(s)
s.discard(3)
print(s)
#Copiando um Conjunto para outro
s = {1, 2, 4, 5}
print(s)
#Forma 1 Deep Copy
novo = s.copy()
print(novo)
novo.add(3)
print(novo)
#Forma 2 Shalow Copy
novo = s
print(novo)
novo.add(3)
print(novo)
print(s)
s.clear()
print(s)
# Unir Conjuntos
a = {1, 2, 3, 4}
b = {3, 5, 6, 7, 8}
#Forma1 - Union
print(a)
print(b)
c = a.union(b)
print(c)
c.add(11)
#Forma 2 Pipe |
d = a | b | c
print(d)
#Elementos que estão em ambos conjuntos
#Forma 1 - Intersecion
e = {1, 2, 3, 4}
f = {3, 5, 6, 7, 8}
g = e.intersection(f)
print(g)
#Forma 2 - Usando &
h = e & f
print(h)
#Conjuntos de elementos distintos
i = {1, 2, 3, 4}
j = {3, 5, 6, 7, 8}
k = i.difference(j)
l = j.difference(i)
print(k)
print(l)
"""
#Soma, Max, Min e Len
m = {3, 5, 6, 7, 8}
print(sum(m))
print(max(m))
print(min(m))
print(len(m))
|
from adv_prodcon import Producer, Consumer
import time
class MyProducer(Producer):
def __init__(self, *args, **kwargs):
tag = kwargs.pop("tag")
Producer.__init__(self, *args, **kwargs)
self.work_kwargs = {"tag": tag}
@staticmethod
def work(on_start_result, state, message_pipe, *args, **kwargs):
return f'Message from {kwargs["tag"]}'
class MyConsumer(Consumer):
@staticmethod
def work(items, on_start_result, state, message_pipe, *args):
for item in items:
print(item)
if __name__ == "__main__":
producer1 = MyProducer(tag="Mary", work_timeout=1)
producer2 = MyProducer(tag="James", work_timeout=1)
consumer = MyConsumer()
producer1.set_subscribers([consumer.get_work_queue()])
producer2.set_subscribers([consumer.get_work_queue()])
producer1.start_new()
producer2.start_new()
consumer.start_new()
time.sleep(5)
|
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import settings
import glob
import json
import os
from django.db import connection
from django.core.management.commands.runserver import Command as BaseCommand
class Command(BaseCommand):
"""
Prints out selected settings in env variable format
"""
help = """Prints out selected settings in env variable format"""
def handle(self, *args, **kwargs):
cursor = connection.cursor()
cursor.execute("SELECT * from api_key()")
(API_USER, API_KEY) = cursor.fetchone()
cursor.close()
DB = settings.DATABASES.get("default")
config = {
"WARP_DRIVE_PORT": settings.WARP_DRIVE_PORT,
"MAILBOX_PORT": settings.MAILBOX_PORT,
"REPORT_PORT": settings.REPORT_PORT,
"DEVICE_AGGREGATOR_PORT": settings.DEVICE_AGGREGATOR_PORT,
"HTTP_FRONTEND_PORT": settings.HTTP_FRONTEND_PORT,
"HTTPS_FRONTEND_PORT": settings.HTTPS_FRONTEND_PORT,
"HTTP_AGENT_PROXY_PASS": settings.HTTP_AGENT_PROXY_PASS,
"HTTP_AGENT2_PORT": settings.HTTP_AGENT2_PORT,
"HTTP_AGENT2_PROXY_PASS": settings.HTTP_AGENT2_PROXY_PASS,
"HTTP_API_PROXY_PASS": settings.HTTP_API_PROXY_PASS,
"IML_API_PORT": settings.IML_API_PORT,
"IML_API_PROXY_PASS": settings.IML_API_PROXY_PASS,
"WARP_DRIVE_PROXY_PASS": settings.WARP_DRIVE_PROXY_PASS,
"MAILBOX_PROXY_PASS": settings.MAILBOX_PROXY_PASS,
"REPORT_PROXY_PASS": settings.REPORT_PROXY_PASS,
"SSL_PATH": settings.SSL_PATH,
"DEVICE_AGGREGATOR_PROXY_PASS": settings.DEVICE_AGGREGATOR_PROXY_PASS,
"UPDATE_HANDLER_PROXY_PASS": settings.UPDATE_HANDLER_PROXY_PASS,
"GRAFANA_PORT": settings.GRAFANA_PORT,
"GRAFANA_PROXY_PASS": settings.GRAFANA_PROXY_PASS,
"INFLUXDB_SERVER_FQDN": settings.INFLUXDB_SERVER_FQDN,
"INFLUXDB_PROXY_PASS": settings.INFLUXDB_PROXY_PASS,
"TIMER_PORT": settings.TIMER_PORT,
"TIMER_SERVER_FQDN": settings.TIMER_SERVER_FQDN,
"TIMER_PROXY_PASS": settings.TIMER_PROXY_PASS,
"ALLOW_ANONYMOUS_READ": json.dumps(settings.ALLOW_ANONYMOUS_READ),
"BUILD": settings.BUILD,
"IS_RELEASE": json.dumps(settings.IS_RELEASE),
"LOG_PATH": settings.LOG_PATH,
"SERVER_HTTP_URL": settings.SERVER_HTTP_URL,
"SITE_ROOT": settings.SITE_ROOT,
"VERSION": settings.VERSION,
"API_USER": API_USER,
"API_KEY": API_KEY,
"REPORT_PATH": settings.REPORT_PATH,
"PROXY_HOST": settings.PROXY_HOST,
"INFLUXDB_IML_DB": settings.INFLUXDB_IML_DB,
"INFLUXDB_STRATAGEM_SCAN_DB": settings.INFLUXDB_STRATAGEM_SCAN_DB,
"INFLUXDB_IML_STATS_DB": settings.INFLUXDB_IML_STATS_DB,
"INFLUXDB_IML_STATS_LONG_DURATION": settings.INFLUXDB_IML_STATS_LONG_DURATION,
"INFLUXDB_PORT": settings.INFLUXDB_PORT,
"DB_HOST": DB.get("HOST"),
"DB_NAME": DB.get("NAME"),
"DB_USER": DB.get("USER"),
"DB_PASSWORD": DB.get("PASSWORD"),
"REPO_PATH": settings.REPO_PATH,
"AMQP_BROKER_USER": settings.AMQP_BROKER_USER,
"AMQP_BROKER_PASSWORD": settings.AMQP_BROKER_PASSWORD,
"AMQP_BROKER_VHOST": settings.AMQP_BROKER_VHOST,
"AMQP_BROKER_HOST": settings.AMQP_BROKER_HOST,
"AMQP_BROKER_PORT": settings.AMQP_BROKER_PORT,
"AMQP_BROKER_URL": settings.BROKER_URL,
"BRANDING": settings.BRANDING,
"USE_STRATAGEM": settings.USE_STRATAGEM,
"DBLOG_HW": settings.DBLOG_HW,
"DBLOG_LW": settings.DBLOG_LW,
}
if settings.EXA_VERSION:
config["EXA_VERSION"] = settings.EXA_VERSION
xs = map(lambda x: "{0}={1}".format(x[0], x[1]), config.items())
print("\n".join(xs))
|
import os
import multiprocessing
import numpy as np
class ActiveLearningExperiment:
def __init__(self, query_method, instance, gpu_index, dataset_name,
model, train_func, num_init, num_add_per_iter,
num_iter, x_train, y_train, x_test, y_test):
"""
constructor
:param query_method: Query method, child of Query class
:param instance: Instance of ALexp for this configuration (we may need to run multiple)
:param gpu_index: index of gpu to run the experiment on
:param dataset_name: for logging purposes
:param model: function to build a keras model (input_shape, num_classes)
:param train_func: Training function (see example)
:param num_init: number of initial labels
:param num_add_per_iter: number of labels to add in each iteration
:param num_iter: number of iterations
"""
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_index)
# https://stackoverflow.com/questions/26239695/parallel-execution-of-class-methods
# If we would like to execute this class in parallel, we need to use
# a process queue so that methods can pass data back up
self.manager = multiprocessing.Manager()
self.query_func = query_method
self.query_name = self.query_func.__name__
# build model
self.model = model(x_train[0].shape, len(y_train[0]))
self.train_func = train_func
self.num_add_per_iter = num_add_per_iter
self.num_iter = num_iter
self.labeled_idx = np.random.choice(x_train.shape[0], num_init, replace=False)
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.test_acc_hist = self.manager.list()
self.labeled_idx_hist = self.manager.list()
self.log_dir = 'logs/{0}_{1}_{2}_{3}_{4}.txt'.format(self.query_name, instance, num_init, num_add_per_iter, dataset_name)
def _al_iter(self):
"""
One step of active learning.
1. train model
2. log test acc and history of labeled indices
3. add new labels to labeled set
"""
# 1. Train using the given keras Model object
test_acc, trained_model = self.train_func(self.model,
self.x_train[self.labeled_idx],
self.y_train[self.labeled_idx],
self.x_test,
self.y_test)
# 2. Log
self.test_acc_hist.append(test_acc)
self.labeled_idx_hist.append(self.labeled_idx)
with open(self.log_dir, 'w') as f:
f.write(str(self.test_acc_hist).strip('[]'))
f.write('\n')
for _iter in range(len(self.labeled_idx_hist)):
f.write(str(self.labeled_idx_hist[_iter]).strip('[]'))
# 3. Query using the new model we just learned
query_obj = self.query_func(trained_model, self.x_train.shape[0], self.y_train.shape[0], 1)
self.labeled_idx = query_obj.query(self.x_train, self.y_train, self.labeled_idx, self.num_add_per_iter)
def begin_al_loop(self, ):
for _iter in range(self.num_iter):
self._al_iter()
|
import itertools
import os
import pytest
import shapely.geometry
from sentinelhub import (
CRS,
BBox,
BBoxSplitter,
CustomGridSplitter,
DataCollection,
OsmSplitter,
TileSplitter,
UtmGridSplitter,
UtmZoneSplitter,
read_data,
)
from sentinelhub.testing_utils import get_input_folder
geojson = read_data(os.path.join(get_input_folder(__file__), "cies_islands.json"))
AREA = shapely.geometry.shape(geojson)
BBOX_GRID = [
BBox((x / 10, y / 100, (x + 1) / 10, (y + 1) / 100), CRS.WGS84)
for x, y in itertools.product(range(-90, -87), range(4200, 4250))
]
@pytest.mark.parametrize(
"constructor, args, kwargs, bbox_len",
[
[BBoxSplitter, ([AREA], CRS.WGS84, 5), dict(reduce_bbox_sizes=True), 19],
[OsmSplitter, ([AREA], CRS.WGS84, 15), dict(reduce_bbox_sizes=True), 24],
[
CustomGridSplitter,
([AREA], CRS.WGS84, BBOX_GRID),
dict(bbox_split_shape=(3, 4), reduce_bbox_sizes=False),
41,
],
[UtmGridSplitter, ([AREA], CRS.WGS84), dict(bbox_size=(1200, 1200)), 16],
[UtmZoneSplitter, ([AREA], CRS.WGS84), dict(bbox_size=(1000, 1000)), 19],
[UtmZoneSplitter, ([AREA], CRS.WGS84), dict(bbox_size=(1000, 1000), offset=(500, 500)), 21],
pytest.param(
TileSplitter,
([AREA], CRS.WGS84, ("2017-10-01", "2018-03-01")),
dict(tile_split_shape=40, data_collection=DataCollection.SENTINEL2_L1C, reduce_bbox_sizes=True),
13,
marks=pytest.mark.sh_integration,
),
pytest.param(
TileSplitter,
([AREA], CRS.WGS84, ("2020-10-01", "2020-10-05")),
dict(tile_split_shape=10, data_collection=DataCollection.LANDSAT_OT_L2, reduce_bbox_sizes=True),
3,
marks=pytest.mark.sh_integration,
),
],
)
def test_return_type(constructor, args, kwargs, bbox_len):
splitter = constructor(*args, **kwargs)
return_lists = [
(splitter.get_bbox_list(buffer=0.2), BBox),
(splitter.get_info_list(), dict),
(splitter.get_geometry_list(), (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)),
]
for return_list, item_type in return_lists:
assert isinstance(return_list, list)
assert len(return_list) == bbox_len
for return_item in return_list:
assert isinstance(return_item, item_type)
|
import mne
import numpy as np
'''
https://realpython.com/pypi-publish-python-package/#publishing-to-pypi
https://towardsdatascience.com/how-to-publish-a-python-package-to-pypi-7be9dd5d6dcd
'''
class sim_data:
def __init__ (self):
b = 1
def gen_epochs (self, nchannels=None, labels=None, sfreq=None,
ntimes=None, n_epcohs=None):
# Generate data
if (nchannels is None) & (labels is None):
labels_ch = ['FP1', 'FP2', 'F3', 'F4', 'F7', 'F8', 'C3', 'C4',
'T3', 'T4', 'O1', 'O2']
N_channels = len ( labels_ch )
elif (labels is None) & (nchannels is not None):
if not isinstance ( nchannels, int ):
raise ('Please provide type int')
N_channels = nchannels
nrange_ch = range ( 0, nchannels )
labels_ch = [f'EEG_{idx}' for idx in nrange_ch]
elif (nchannels is None) & (labels is not None):
if not isinstance ( labels, list ):
raise ('Please provide type list: ["EEG_1","EEG_2"]')
labels_ch = labels
N_channels = len ( labels )
N_epochs = 5 if n_epcohs is None else n_epcohs
N_times = 1000 if ntimes is None else ntimes
# Set sampling freq
Sfreqs = 250 if sfreq is None else sfreq # A reasonable random choice
np.random.seed ( 42 )
data = np.random.rand ( N_epochs, N_channels, N_times )
# 10Hz sinus waves with random phase differences in each channel and epoch
# Generate 10Hz sinus waves to show difference between connectivity
# over time and over trials. Here we expect con over time = 1
for i in range ( N_epochs ):
for c in range ( N_channels ):
wave_freq = 10
epoch_len = N_times / Sfreqs
# Introduce random phase for each channel
phase = np.random.rand ( 1 ) * 10
# Generate sinus wave
x = np.linspace ( -wave_freq * epoch_len * np.pi + phase,
wave_freq * epoch_len * np.pi + phase, N_times )
data [i, c] = np.squeeze ( np.sin ( x ) )
info = mne.create_info ( ch_names=labels_ch,
ch_types=['eeg'] * len ( labels_ch ),
sfreq=Sfreqs )
epochs = mne.EpochsArray ( data, info )
return epochs
# sg = simeeg ()
# ep = sg.gen_epochs ( labels=['A','B','C'] )
|
import torch.nn.functional as F
import torch.nn as nn
import torch
import torch.optim as optim
import numpy as np
import math
from torch.nn import init
class NoisyLinear(nn.Module):
"""Factorised Gaussian NoisyNet"""
def __init__(self, in_features, out_features, sigma0=0.5):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.bias = nn.Parameter(torch.Tensor(out_features))
self.noisy_weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.noisy_bias = nn.Parameter(torch.Tensor(out_features))
self.noise_std = sigma0 / math.sqrt(self.in_features)
self.reset_parameters()
self.register_noise()
def register_noise(self):
in_noise = torch.FloatTensor(self.in_features)
out_noise = torch.FloatTensor(self.out_features)
noise = torch.FloatTensor(self.out_features, self.in_features)
self.register_buffer('in_noise', in_noise)
self.register_buffer('out_noise', out_noise)
self.register_buffer('noise', noise)
def sample_noise(self):
self.in_noise.normal_(0, self.noise_std)
self.out_noise.normal_(0, self.noise_std)
self.noise = torch.mm(self.out_noise.view(-1, 1), self.in_noise.view(1, -1))
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
self.noisy_weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
self.noisy_bias.data.uniform_(-stdv, stdv)
def forward(self, x):
"""
Note: noise will be updated if x is not volatile
"""
normal_y = nn.functional.linear(x, self.weight, self.bias)
if self.training:
# update the noise once per update
self.sample_noise()
noisy_weight = self.noisy_weight * self.noise
noisy_bias = self.noisy_bias * self.out_noise
noisy_y = nn.functional.linear(x, noisy_weight, noisy_bias)
return noisy_y + normal_y
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) + ')'
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class CnnActorCriticNetwork3(nn.Module):
def __init__(self, input_size, output_size, use_noisy_net=False):
super(CnnActorCriticNetwork3, self).__init__()
if use_noisy_net:
print('Use NoisyNet')
linear = NoisyLinear
else:
linear = nn.Linear
self.feature = nn.Sequential(
nn.Conv2d(4, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
Flatten(),
linear(7 * 7 * 64, 256),
nn.ReLU(),
linear(256, 448),
nn.ReLU()
)
self.actor = nn.Sequential(
linear(448, 448),
nn.ReLU(),
linear(448, output_size)
)
self.extra_layer = nn.Sequential(
linear(448, 448),
nn.ReLU()
)
self.critic_ext = linear(448, 1)
self.critic_int = linear(448, 1)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
init.orthogonal_(m.weight, np.sqrt(2))
m.bias.data.zero_()
init.orthogonal_(self.critic_ext.weight, 0.01)
self.critic_ext.bias.data.zero_()
init.orthogonal_(self.critic_int.weight, 0.01)
self.critic_int.bias.data.zero_()
for i in range(len(self.actor)):
if type(self.actor[i]) == nn.Linear:
init.orthogonal_(self.actor[i].weight, 0.01)
self.actor[i].bias.data.zero_()
for i in range(len(self.extra_layer)):
if type(self.extra_layer[i]) == nn.Linear:
init.orthogonal_(self.extra_layer[i].weight, 0.1)
self.extra_layer[i].bias.data.zero_()
def forward(self, state):
x = self.feature(state)
action_scores = self.actor(x)
action_probs = F.softmax(action_scores, dim=1)
value_ext = self.critic_ext(self.extra_layer(x) + x)
value_int = self.critic_int(self.extra_layer(x) + x)
return action_probs, value_ext, value_int
class RNDModel3(nn.Module):
def __init__(self, input_size, output_size):
super(RNDModel3, self).__init__()
self.input_size = input_size
self.output_size = output_size
feature_output = 7 * 7 * 64
self.predictor = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=8, stride=4),
nn.LeakyReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.LeakyReLU(),
Flatten(),
nn.Linear(feature_output, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512)
)
self.target = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=8, stride=4),
nn.LeakyReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.LeakyReLU(),
Flatten(),
nn.Linear(feature_output, 512)
)
self.initial = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=8, stride=4),
nn.LeakyReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.LeakyReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.LeakyReLU(),
Flatten(),
nn.Linear(feature_output, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512)
)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
init.orthogonal_(m.weight, np.sqrt(2))
m.bias.data.zero_()
torch.save(self.predictor.state_dict(), 'initial')
self.initial.load_state_dict(torch.load('initial'))
# Set target parameters as untrainable
for param in self.target.parameters():
param.requires_grad = False
for param in self.initial.parameters():
param.requires_grad = False
def forward(self, next_obs):
target_feature = self.target(next_obs)
predict_feature = self.predictor(next_obs)
initial_feature = self.initial(next_obs)
return predict_feature, target_feature, initial_feature
|
from datetime import datetime, timedelta
import requests
from airflow import DAG
from airflow.operators.python import PythonOperator
from app.utils.terminal import print_to_console
from app.models.dto.user_dto import UserDTO
from app.models.dto.address_dto import AddressDTO
from app.models.dto.company_dto import CompanyDTO
from app.models.user import Users
from app.models.address import Address
from app.models.company import Company
def fetch_users():
name: str = "fetch_users: executing"
print_to_console(name)
endpoint: str = "https://jsonplaceholder.typicode.com/users"
response = requests.get(url=endpoint)
data: list = response.json()
_users: list = []
_address: list = []
_companies: list = []
for user in data:
try:
userObject: dict = {
"name": user.get("name"),
"username": user.get("username"),
"email": user.get("email"),
"phone": user.get("phone"),
}
new_user: UserDTO = Users.create(user=userObject)
_users.append(new_user)
address: dict = user.get("address")
addressObject: dict = {
"user_id": new_user.id,
"street": address.get("street"),
"suite": address.get("suite"),
"city": address.get("city"),
"zipcode": address.get("zipcode"),
}
new_address: AddressDTO = Address.create(address=addressObject)
_address.append(new_address)
company: dict = user.get("company")
companyObject: dict = {
"user_id": new_user.id,
"name": company.get("name"),
"catchPhrase": company.get("catchPhrase"),
"bs": company.get("bs"),
}
new_company: CompanyDTO = Company.create(company=companyObject)
_companies.append(new_company)
except Exception:
pass
print_to_console("Done Fetching")
print_to_console(
f"No Users: {len(_users): 10} \nNo Addresses: {len(_address): 10} \nNo Companies: {len(_companies)}"
)
default_args = {
"owner": "airflow",
"depends_on_past": False,
"email": ["fedjioraymond@gmail.com"],
"email_on_failure": ["fedjioraymond@gmail.com"],
"email_on_retry": ["fedjioraymond@gmail.com"],
}
with DAG(
"json_placeholder_server",
default_args=default_args,
description="A simple tutorial DAG",
schedule_interval=None,
start_date=datetime(2021, 12, 2),
catchup=False,
tags=["test"],
) as dag:
fetch_users = PythonOperator(
task_id="fetch_user_from_server", python_callable=fetch_users
)
|
import hashlib
import inspect
import numpy as np
from .generic import isinstance_str
def hash_path(nodes):
h = hashlib.md5()
for node in nodes:
to_str = []
if isinstance_str(node, 'InputNode'):
to_str.append(node.output)
else:
if isinstance_str(node.layer, 'Lambda'):
to_str.append(inspect.getsource(node.layer.transform_fn))
elif isinstance_str(node.layer, 'StatefulLayer'):
to_str.append(inspect.getsource(node.layer.partial_fit))
to_str.append(inspect.getsource(node.layer.fit))
to_str.append(inspect.getsource(node.layer.transform))
to_str += list(node.kwargs.values())
for metadata in node.layer.metadata.values():
to_str.append(metadata)
elif isinstance_str(node.layer, 'StatelessLayer'):
to_str.append(inspect.getsource(node.layer.transform))
to_str += list(node.layer.kwargs.values())
for s in to_str:
if isinstance_str(s, 'ndarray'):
h.update(str(bytes(s)).encode())
else:
h.update(str(s).encode())
return h.hexdigest()
|
import tensorflow as tf
import numpy
import logging
logger = logging.getLogger()
from mlqm import DEFAULT_TENSOR_TYPE
from mlqm.hamiltonians import Hamiltonian
class NuclearPotential(Hamiltonian):
"""Nuclear Physics Potential
"""
def __init__(self, **kwargs):
'''
Arguments:
mass {float} -- Nuclear mass, aka number of electrons
'''
Hamiltonian.__init__(self, **kwargs)
# Check the parameters have everything needed:
for parameter in ["mass"]:
if parameter not in self.parameters:
raise KeyError(f"Parameter {parameter} not suppliled as keyword arg to HarmonicOscillator")
if 'vkr' in self.parameters:
if self.parameters['vkr'] not in [2, 4, 6]:
raise KeyError(f"Parameter vkr set to {self.parameters['vkr']} but must be 2, 4 or 6")
self.vkr = tf.constant(self.parameters['vkr'], dtype = DEFAULT_TENSOR_TYPE)
else:
logger.info("Setting vkr to 4 in the nuclear potential by default.")
self.vkr = tf.constant(4, dtype = DEFAULT_TENSOR_TYPE)
if self.vkr == 2.0:
self.v0r = tf.constant(-133.3431, dtype=DEFAULT_TENSOR_TYPE)
self.v0s = tf.constant(-9.0212, dtype = DEFAULT_TENSOR_TYPE)
self.ar3b = tf.constant(8.2757658256, dtype = DEFAULT_TENSOR_TYPE)
logger.info(f"Using vkr = {self.vkr}")
elif self.vkr == 4.0:
self.v0r = tf.constant(-487.6128, dtype=DEFAULT_TENSOR_TYPE)
self.v0s = tf.constant(-17.5515, dtype = DEFAULT_TENSOR_TYPE)
self.ar3b = tf.constant(26.0345712467, dtype = DEFAULT_TENSOR_TYPE)
logger.info(f"Using vkr = {self.vkr}")
elif self.vkr == 6.0:
self.v0r = tf.constant(-1064.5010, dtype=DEFAULT_TENSOR_TYPE)
self.v0s = tf.constant(-26.0830, dtype = DEFAULT_TENSOR_TYPE)
self.ar3b = tf.constant(51.5038930567, dtype = DEFAULT_TENSOR_TYPE)
logger.info(f"Using vkr = {self.vkr}")
self.HBAR = tf.constant(197.327, dtype = DEFAULT_TENSOR_TYPE)
@tf.function(experimental_compile=False)
def pionless_2b(self, *, r_ij):
x = self.vkr * r_ij
vr = tf.exp(-x**2/4.0)
return self.v0r*vr, self.v0s*vr
@tf.function(experimental_compile=False)
def pionless_3b(self, *, r_ij, nwalkers):
# pot_3b = tf.zeros(shape=(nwalkers), dtype=DEFAULT_TENSOR_TYPE)
x = self.vkr * r_ij
vr = tf.exp(-x**2/4.0)
pot_3b = vr * self.ar3b
return pot_3b
@tf.function(experimental_compile=False)
def potential_energy(self, *, inputs):
"""Return potential energy
Calculate and return the PE.
Arguments:
inputs {tf.Tensor} -- Tensor of shape [N, dimension], must have graph enabled
Returns:
tf.Tensor - potential energy of shape [1]
"""
# Potential calculation
# Prepare buffers for the output:
# (Walker shape is (self.nwalkers, self.nparticles, self.n) )
nwalkers = inputs.shape[0]
nparticles = inputs.shape[1]
if nparticles == 2:
alpha = 1.0
elif nparticles > 2:
alpha = -1.0
# print("Alpha: ", alpha)
# gr3b = tf.Variable(tf.zeros(shape=[nwalkers,nparticles], dtype=DEFAULT_TENSOR_TYPE))
gr3b = [tf.zeros(shape=[nwalkers], dtype=DEFAULT_TENSOR_TYPE) for p in range(nparticles)]
V_ijk = tf.zeros(shape=[nwalkers], dtype=DEFAULT_TENSOR_TYPE) # three body potential terms
v_ij = tf.zeros(shape=[nwalkers], dtype=DEFAULT_TENSOR_TYPE) # 2 body potential terms:
for i in range (nparticles-1):
for j in range (i+1,nparticles):
#
x_ij = inputs[:,i,:]-inputs[:,j,:]
r_ij = tf.sqrt(tf.reduce_sum(x_ij**2,axis=1))
vrr, vrs = self.pionless_2b(r_ij=r_ij)
# v_ij += self.pionless_2b(r_ij=r_ij, nwalkers=nwalkers)
v_ij += vrr + alpha * vrs
if (nparticles > 2 ):
t_ij = self.pionless_3b(r_ij=r_ij, nwalkers=nwalkers)
gr3b[i] += t_ij
gr3b[j] += t_ij
# gr3b[i] = gr3b[:,i].assign(gr3b[:,i] + t_ij)
# gr3b = gr3b[:,j].assign(gr3b[:,j] + t_ij)
V_ijk -= t_ij**2
# stack up gr3b:
gr3b = tf.stack(gr3b, axis=1)
V_ijk += 0.5 * tf.reduce_sum(gr3b**2, axis = 1)
pe = v_ij + V_ijk
return pe
# @tf.function()
@tf.function(experimental_compile=False)
def compute_energies(self, inputs, logw_of_x, dlogw_dx, d2logw_dx2):
'''Compute PE, KE_JF, and KE_direct
Harmonic Oscillator Energy Calculations
Arguments:
inputs {[type]} -- walker coordinates (shape is [nwalkers, nparticles, dimension])
logw_of_x {[type]} -- computed wave function at each walker
dlogw_dx {[type]} -- first derivative of wavefunction at each walker
d2logw_dx2 {[type]} -- second derivative of wavefunction at each walker
Raises:
NotImplementedError -- [description]
Returns:
pe -- potential energy
ke_jf -- JF Kinetic energy
ke_direct -- 2nd deriv computation of potential energy
'''
# Potential energy depends only on the wavefunction
pe = self.potential_energy(inputs=inputs)
# KE by parts needs only one derivative
ke_jf = self.kinetic_energy_jf(dlogw_dx=dlogw_dx, M=self.parameters["mass"])
# True, directly, uses the second derivative
ke_direct = self.kinetic_energy(KE_JF = ke_jf, d2logw_dx2 = d2logw_dx2, M=self.parameters["mass"])
return pe, ke_jf, ke_direct
|
"""
datalite.constraints module introduces constraint
types that can be used to hint field variables,
that can be used to signal datalite decorator
constraints in the database.
"""
from typing import TypeVar, Union, Tuple
T = TypeVar('T')
class ConstraintFailedError(Exception):
"""
This exception is raised when a Constraint fails.
"""
pass
"""
Dataclass fields hinted with this type signals
datalite that the bound column of this
field in the table is NOT NULL and UNIQUE.
"""
Unique = Union[Tuple[T], T]
|
import argparse
from collections import Counter, defaultdict
import json
import random
import re
import time
import numpy as np
from pystalk import BeanstalkClient
from bucket_extraction import getBucketsFromText
import bucket_generation.utils as generation_utils
def generateNGrams(candidates):
ngrams = defaultdict(lambda: Counter())
lengthDistribution = Counter()
delimiterDistribution = Counter()
for bucket in candidates:
splitBucket = re.split(r'([\.|\-|_])', bucket.lower().strip())
delimiterDistribution.update(
[
d for d in splitBucket if d in [".", "-", "_"]
]
)
tokens = [t for t in splitBucket if t not in [".", "-", "_"]]
for i in range(len(list(tokens))):
ngrams[
tuple(tokens[max(0, i-1): i])
][tokens[i]] += 1
lengthDistribution[len(tokens)] += 1
return ngrams, lengthDistribution, delimiterDistribution
def sampleFromCounter(counter):
total = sum(counter.values())
return np.random.choice([k for k,v in counter.items()], p=[v/total for k,v in counter.items()])
def streamNGramCandidates(
startingCandidates=None, beanstalkPort=None, numTrials=float("inf"), name="ngrams", experiment=False, public=False,
):
candidates = startingCandidates or generation_utils.getExistingBuckets(public=public)
previouslySeen = startingCandidates | generation_utils.readBucketsFromFile(f"./data/generation/{name}.txt")
beanstalkClient = generation_utils.getBeanstalkClient(port=beanstalkPort)
while numTrials > 0:
# Update our prior distribution for every 10,000 candidates.
print("Initializing bigram distribution.")
with generation_utils.Profiler(generation_utils.ProfilerType.TRAIN, name):
if experiment:
# add all existing buckets that have been guessed by ngrams and are in seed set.
candidates |= generation_utils.getExistingAlreadyGuessedBuckets(name, public=public)
nGrams, lengthDistribution, delimiterDistribution = generateNGrams(candidates)
for _ in range(int(1e4)):
with generation_utils.Profiler(generation_utils.ProfilerType.GENERATE, name) as p:
bucket = []
bucketLength = sampleFromCounter(lengthDistribution)
for _ in range(bucketLength):
if len(bucket) > 0:
bucket += [sampleFromCounter(delimiterDistribution)]
ngramsKey = tuple(bucket[-2:-1])
if ngramsKey in nGrams:
bucket += sampleFromCounter(nGrams[ngramsKey])
bucket = "".join(bucket)
p.bucket(bucket)
if len(bucket) < 64 and bucket not in previouslySeen:
previouslySeen.add(bucket)
beanstalkClient.put_job("generation/{},{}".format(name, bucket))
print("Generated: " + bucket)
numTrials -= 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run the ngrams generator.')
generation_utils.addArguments(parser)
args = parser.parse_args()
candidates = generation_utils.getStartBucketNames(args)
streamNGramCandidates(name=args.name, startingCandidates=candidates, public=args.public, numTrials=int(args.num_trials) or float("inf"))
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# Original file Copyright Crytek GMBH or its affiliates, used under license.
#
from waflib.Configure import conf
import lumberyard
PLATFORM = 'linux_x64'
# Required load_<PLATFORM>_common_settings(ctx)
@conf
def load_linux_x64_common_settings(ctx):
env = ctx.env
platform_settings = ctx.get_platform_settings(PLATFORM)
default_bin_search_paths = ['/usr/bin']
cc_compiler = platform_settings.attributes.get('cc_compiler', 'clang')
cxx_compiler = platform_settings.attributes.get('cxx_compiler', 'clang++')
compiler_bin_search_paths = platform_settings.attributes.get('compiler_search_paths', default_bin_search_paths)
error_message_fmt = "Unable to detect '%s' in search paths: %s. Make sure it is installed on this system"
ctx.find_program('ar',
path_list=default_bin_search_paths,
var='AR',
errmsg=error_message_fmt % ('ar', ','.join(default_bin_search_paths)),
silent_output=False)
ctx.find_program(cc_compiler,
path_list=compiler_bin_search_paths,
var='CC',
errmsg=error_message_fmt % (cc_compiler, ','.join(compiler_bin_search_paths)),
silent_output=False)
ctx.find_program(cxx_compiler,
path_list=compiler_bin_search_paths,
var='CXX',
errmsg=error_message_fmt % (cxx_compiler, ','.join(compiler_bin_search_paths)),
silent_output=False)
env['LINK'] = env['LINK_CC'] = env['LINK_CXX'] = env['CXX']
# Pattern to transform outputs
env['cprogram_PATTERN'] = '%s'
env['cxxprogram_PATTERN'] = '%s'
env['cshlib_PATTERN'] = 'lib%s.so'
env['cxxshlib_PATTERN'] = 'lib%s.so'
env['cstlib_PATTERN'] = 'lib%s.a'
env['cxxstlib_PATTERN'] = 'lib%s.a'
# ASAN and ASLR
# once we can use clang 4, add , '-fsanitize-address-use-after-scope'
# disabled until the linker requirements are worked out on linux
env['LINKFLAGS_ASLR'] = [] #['-fsanitize=memory']
env['ASAN_cflags'] = [] # ['-fsanitize=address']
env['ASAN_cxxflags'] = [] # ['-fsanitize=address']
ctx.load_cryengine_common_settings()
ctx.load_clang_common_settings()
# Required load_<PLATFORM>_configuration_settings(ctx, platform_configuration)
@conf
def load_linux_x64_configuration_settings(ctx, platform_configuration):
# No special configuration-specific setup needed
pass
# Optional is_<PLATFORM>_available(ctx)
@conf
def is_linux_x64_available(ctx):
return True
@lumberyard.multi_conf
def generate_ib_profile_tool_elements(ctx):
linux_tool_elements = [
'<Tool Filename="x86_64-linux-gnu-gcc" AllowRemote="true" AllowIntercept="false" DeriveCaptionFrom="lastparam" AllowRestartOnLocal="false"/>',
'<Tool Filename="x86_64-linux-gnu-g++" AllowRemote="true" AllowIntercept="false" DeriveCaptionFrom="lastparam" AllowRestartOnLocal="false"/>'
]
return linux_tool_elements
|
entero=5 #Numero entero
lon=5L #Numero LONG
real=0.567 #Numero Real
real=0.56e-3 #Numero Real x 10 a la -3
print real
a = 26
b = 11.3
c = 5
d = 3.5
#SUMA
print a + b
#RESTA
print c - a
#MULTIP
print d * c
#EXPONENTE
print c ** 3
#DIVISION
print c / a
#DIVISION ENTERA
print c / a
#MODULO
print 7%3
|
# Generated by Django 3.2.3 on 2021-08-12 17:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0008_customer_profile_picture'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='profile_picture',
field=models.ImageField(blank=True, default='profile_null.jpg', null=True, upload_to=''),
),
]
|
"""
Functional Test for curve class
"""
import bond_math as bmath
import pandas as pd
# Get expectations
in_curve = pd.read_csv('test_curve_in.csv')
exp_curve_H6 = pd.read_csv('test_curve_H6.csv')
exp_H6 = pd.Series(exp_curve_H6['spot'].tolist(),
index=exp_curve_H6['term'].tolist())
exp_fwd= pd.read_csv('test_fwd_exp.csv')
# Calc Horizon Spot
test_curve = bmath.curve(term_vector=in_curve['term'].tolist(),
spot_vector=in_curve['spot'].tolist())
test_curve.add_discount_factors(compound_periods=2)
calc_H6 = test_curve.calc_horizon_curve(horizon_month=6)
df = pd.DataFrame([exp_H6,calc_H6])
df = df.T
df.columns=['exp','calc']
df.head()
# Calc Forward Rates
df2 = pd.DataFrame()
for term in exp_fwd.columns:
if term == 'Horizon':
continue
df2[f'exp_{term}'] = exp_fwd[term]
calc_fwd = test_curve.calc_forward_rates(forward_term=float(term),
numb_of_horizons=24)
df2[f'calc_{term}'] = calc_fwd
df2.head()
|
import sys, logging, os, random, math, open_color, arcade
#check to make sure we are running the right version of Python
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
#turn on logging, in case we have to leave ourselves debugging messages
logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
MARGIN = 30
SCREEN_TITLE = "Space Defender"
STARTING_LOCATION = (400,100)
#Bullet and Enemies Variables
BULLET_DAMAGE = 10
SmallEnemies = 3
SmallEnemyHP = 30
MediumEnemies = 3
MediumEnemyHP = 60
LargeEnemies = 3
LargeEnemyHP = 100
#Score Variables
HIT_SCORE = 10
KillScoreS = 50
KillScoreM = 100
KillScoreL = 150
class Bullet(arcade.Sprite):
def __init__(self, position, velocity, damage):
super().__init__("assets/laserBlue1.png", 0.5)
(self.center_x, self.center_y) = position
(self.dx, self.dy) = velocity
self.damage = damage
def update(self):
self.center_x += self.dx
self.center_y += self.dy
class Player(arcade.Sprite):
def __init__(self):
super().__init__("assets/PlayerBlue.png", 0.5)
(self.center_x, self.center_y) = STARTING_LOCATION
#Small Enemy
class EnemyS(arcade.Sprite):
def __init__(self, position):
super().__init__("assets/MeteorSmall1.png", 0.5)
self.hp = SmallEnemyHP
(self.center_x, self.center_y) = position
#Medium Enemy
class EnemyM(arcade.Sprite):
def __init__(self, position):
super().__init__("assets/MeteorMedium1.png", 0.5)
self.hp = MediumEnemyHP
(self.center_x, self.center_y) = position
#Large Enemy
class EnemyL(arcade.Sprite):
def __init__(self, position):
super().__init__("assets/MeteorLarge1.png", 0.5)
self.hp = LargeEnemyHP
(self.center_x, self.center_y) = position
class Window(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
self.set_mouse_visible(True)
self.background= arcade.load_texture("assets/blue.png")
#arcade.set_background_color(open_color.black)
self.bullet_list = arcade.SpriteList()
self.enemy_list_small = arcade.SpriteList()
self.enemy_list_medium = arcade.SpriteList()
self.enemy_list_large = arcade.SpriteList()
self.player = Player()
self.score = 0
#Enemy setup
def setup(self):
for i in range(SmallEnemies):
xs = 175
ys = 300 * (i+1) - 20
enemyS = EnemyS((xs,ys))
self.enemy_list_small.append(enemyS)
for i in range(MediumEnemies):
xm = 375
ym = 300 * (i+1) - 20
enemyM = EnemyM((xm,ym))
self.enemy_list_medium.append(enemyM)
for i in range(LargeEnemies):
xl = 625
yl = 300 * (i+1) - 20
enemyL = EnemyL((xl,yl))
self.enemy_list_large.append(enemyL)
def update(self, delta_time):
self.bullet_list.update()
#Small Kill and Score
self.enemy_list_small.update()
for e in self.enemy_list_small:
hit = arcade.check_for_collision_with_list(e, self.bullet_list)
for h in hit:
e.hp = e.hp - h.damage
self.score += HIT_SCORE
h.kill()
if e.hp <=0:
e.kill()
self.score += KillScoreS
#Medium Kill and Score
self.enemy_list_medium.update()
for e in self.enemy_list_medium:
hit = arcade.check_for_collision_with_list(e, self.bullet_list)
for h in hit:
e.hp = e.hp - h.damage
self.score += HIT_SCORE
h.kill()
if e.hp <=0:
e.kill()
self.score += KillScoreM
#Large Kill and Score
self.enemy_list_large.update()
for e in self.enemy_list_large:
hit = arcade.check_for_collision_with_list(e, self.bullet_list)
for h in hit:
e.hp = e.hp - h.damage
self.score += HIT_SCORE
h.kill()
if e.hp <=0:
e.kill()
self.score += KillScoreL
def on_draw(self):
arcade.start_render()
arcade.draw_texture_rectangle(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2, SCREEN_WIDTH, SCREEN_HEIGHT, self.background)
arcade.draw_text(str(self.score), 20, SCREEN_HEIGHT - 40, open_color.white, 16)
self.player.draw()
self.bullet_list.draw()
self.enemy_list_large.draw()
self.enemy_list_medium.draw()
self.enemy_list_small.draw()
def on_mouse_motion(self, x, y, dx, dy):
self.player.center_x = x
def on_mouse_press(self, x, y, button, modifiers):
if button == arcade.MOUSE_BUTTON_LEFT:
x = self.player.center_x
y = self.player.center_y + 15
bullet = Bullet((x,y),(0,10),BULLET_DAMAGE)
self.bullet_list.append(bullet)
def on_mouse_release(self, x, y, button, modifiers):
"""
Called when a user releases a mouse button.
"""
pass
def on_key_press(self, key, modifiers):
if key == arcade.key.LEFT:
print("Left")
elif key == arcade.key.RIGHT:
print("Right")
elif key == arcade.key.UP:
print("Up")
elif key == arcade.key.DOWN:
print("Down")
def on_key_release(self, key, modifiers):
"""
Called whenever a user releases a key.
"""
pass
def main():
window = Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
|
import sys
import argparse
import colorama
from rve import cmd
def _create_sub_parsers(parser):
cmds = [cmd.init, cmd.remove, cmd.run, cmd.status]
cmds_names = [x.name for x in cmds]
cmds_list = '[' + '|'.join(cmds_names) + ']'
desc = 'call `rosh command -h` for help in each command listed below:\n'
for c in cmds:
desc += f'\n {c.name}\t\t{c.desc}'
subparser = parser.add_subparsers(
title='rosh command',
metavar=cmds_list,
description=desc,
dest='cmd')
for c in cmds:
cmd_parser = subparser.add_parser(c.name,description=c.desc)
cmd_parser = c.prepare_arguments(cmd_parser)
cmd_parser.set_defaults(run=c.run)
def main():
colorama.init(autoreset=True)
parser = argparse.ArgumentParser(description='rosh command',
formatter_class=argparse.RawDescriptionHelpFormatter)
_create_sub_parsers(parser)
args = parser.parse_args()
if args.cmd is None:
parser.print_help()
sys.exit()
sys.exit(args.run(args))
|
from ..typing import Number
from .base import MarkdownSection
__all__ = ["MarkdownCodeBlock"]
class MarkdownCodeBlock(MarkdownSection):
def append(self, line: str) -> None:
self.lines.append(line)
def reformatted(self, width: Number = 88) -> str:
return "\n".join([line.rstrip() for line in self.lines])
|
from .robot import Robot
from ..shared import ROBOT, DIRT, BABY, CORRAL, OBSTACLE, get_adjacents, count_dirt
from .utils import closest_target, count_free_babies
from ..logging import LoggerFactory as Logger
log = None
class Reagent(Robot):
'''
Reagent agent. Simple reflex agent
'''
def __init__(self, pos):
global log
log = Logger('Kindergarden').getChild('Reagent')
super().__init__(pos)
def action(self, house):
x, y = self.pos
if self.will_clean:
self.garbage_collected += 1
self.will_clean = False
house[x][y].update(ROBOT)
house[x][y].dirty = False
log.debug(f'Dirt cleaned in ({x}, {y})', 'action')
elif DIRT in house[x][y].value and not self.carrying_baby:
self.will_clean = True
log.debug(f'I\'ll clean dirt on ({x}, {y}) in the next turn', 'action')
elif not self.carrying_baby and BABY in house[x][y].value and not house[x][y].isFixed:
self.carrying_baby = True
house[x][y].update(f'{ROBOT}-{BABY}')
log.debug(f'Loaded baby on ({x}, {y})')
elif self.carrying_baby and CORRAL in house[x][y].value and not f'{CORRAL}-{BABY}' in house[x][y].value:
self.carrying_baby = False
house[x][y].update(f'{BABY}-{ROBOT}')
log.debug(f'Dropped baby in corral at ({x}, {y})', 'action')
else:
adj = get_adjacents(house, self.pos)
if len(adj) == 0:
log.debug('No valid adyacents cells to robot', 'action')
log.debug(f'I can\'t move!!! Waiting for an environment change', 'action')
return
if self.carrying_baby:
self.try_move(house, CORRAL, [OBSTACLE, BABY])
elif count_free_babies(house) > 0:
self.try_move(house, BABY)
elif count_dirt(house) > 0:
self.try_move(house, DIRT)
else:
log.debug('There is no dirt to clean or babies to carry!!! Waiting for an environment change', 'action')
def move(self, house, pos, steps=1):
if steps == 0 or pos == []:
return
cur_cell = house[self.pos[0]][self.pos[1]]
x, y = pos.pop(0)
if self.carrying_baby:
value = f'{ROBOT}-{BABY}'
else:
value = ROBOT
house[x][y].update(value, old=cur_cell)
self.pos = (x, y)
log.debug(f'Moved to ({x}, {y})', 'move')
self.move(house, pos, steps - 1)
def try_move(self, house, target, check=[OBSTACLE]):
p = closest_target(house, self.pos, target, check)
#log.debug(f'p: {p}', 'try_move')
if p == []:
log.debug(f'No path to closest target: {target}', 'try_move')
log.debug(f'I can\'t move!!! Analyzing options... Meanwhile waiting for an environment change', 'try_move')
return False
if target == CORRAL:
self.move(house, p, steps=2)
else:
self.move(house, p)
return True
def __str__(self):
return 'Reagent'
|
import unittest
import torch
import torch.nn as nn
from tinynn.prune.oneshot_pruner import OneShotChannelPruner
from interval import Interval
import torch.nn.functional as F
def removed_idx_group_check(removed_idx, total_idx_len, removed_idx_len, group, offset=0):
for i in range(group):
remove_group_len = removed_idx_len // group
for j in range(i * remove_group_len, i * remove_group_len + remove_group_len):
idx_group_len = total_idx_len // group
assert removed_idx[j] in Interval(offset + i * idx_group_len,
offset + i * idx_group_len + idx_group_len,
upper_closed=False)
class ModifierTester(unittest.TestCase):
def test_cat_graph(self):
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv0 = nn.Conv2d(3, 8, (3, 3))
self.conv1 = nn.Conv2d(3, 8, (3, 3))
self.conv2 = nn.Conv2d(16, 32, (3, 3))
self.linear = nn.Linear(800, 100)
def forward(self, x):
x0 = self.conv0(x)
x1 = self.conv1(x)
cat0 = torch.cat([x0, x1], dim=1)
conv2 = self.conv2(cat0)
view0 = conv2.view((1, -1))
linear0 = self.linear(view0)
return linear0
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.75, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9, 9))
assert model.conv1.out_channels == 2
assert model.conv0.out_channels == 2
assert model.conv2.in_channels == 4
assert model.conv2.out_channels == 8
assert model.linear.in_features == 200
assert model.linear.out_features == 100
def test_tail_linear_graph(self):
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv0 = nn.Conv2d(3, 16, (3, 3), padding=(1, 1))
self.linear = nn.Linear(16 * 9 * 9, 100)
def forward(self, x):
conv0 = self.conv0(x)
view0 = conv0.view((1, -1))
linear0 = self.linear(view0)
return linear0
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.75, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9, 9))
assert model.conv0.out_channels == 4
assert model.linear.in_features == 324
assert model.linear.out_features == 100
def test_cat_add_graph(self):
class TestModel(nn.Module):
def __init__(self):
super().__init__()
self.conv0 = nn.Conv2d(3, 8, (3, 3), padding=(1, 1))
self.conv1 = nn.Conv2d(3, 8, (3, 3), padding=(1, 1))
self.conv2 = nn.Conv2d(3, 16, (3, 3), padding=(1, 1))
self.conv3 = nn.Conv2d(16, 32, (3, 3), padding=(1, 1))
def forward(self, x):
x0 = self.conv0(x)
x1 = self.conv1(x)
x2 = self.conv2(x)
cat0 = torch.cat([x0, x1], dim=1)
add0 = torch.add(cat0, x2)
return self.conv3(add0)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.75, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9, 9))
assert model.conv0.out_channels == 2
assert model.conv1.out_channels == 2
assert model.conv2.out_channels == 4
assert model.conv3.in_channels == 4
assert model.conv3.out_channels == 8
def test_flatten_graph(self):
class TestFlattenModel(nn.Module):
def __init__(self):
super(TestFlattenModel, self).__init__()
self.conv0 = nn.Conv2d(3, 16, (3, 3))
self.conv1 = nn.Conv2d(16, 32, (3, 3))
self.dropout = nn.Dropout()
self.linear1 = nn.Linear(800, 100)
self.linear2 = nn.Linear(100, 10)
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
flatten0 = torch.flatten(conv1, 1)
dropout0 = self.dropout(flatten0)
linear1 = self.linear1(dropout0)
linear2 = self.linear2(linear1)
return linear2
model = TestFlattenModel()
model(torch.ones(1, 3, 9, 9))
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.75, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9, 9))
assert model.conv0.out_channels == 4
assert model.conv1.out_channels == 8
assert model.linear1.in_features == 200
assert model.linear1.out_features == 25
assert model.linear2.in_features == 25
assert model.linear2.out_features == 10
def test_loop_cat_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 16, (3, 3))
self.conv2 = nn.Conv2d(64, 128, (3, 3))
self.relu1 = torch.nn.modules.activation.ReLU(inplace=True)
self.relu2 = torch.nn.modules.activation.ReLU(inplace=True)
self.relu3 = torch.nn.modules.activation.ReLU(inplace=True)
self.relu4 = torch.nn.modules.activation.ReLU(inplace=True)
def forward(self, x):
conv1 = self.conv1(x)
relu1 = self.relu1(conv1)
relu2 = self.relu2(conv1)
relu3 = self.relu3(conv1)
relu4 = self.relu4(conv1)
z = torch.cat([relu1, relu2, relu3, relu4], dim=1)
return self.conv2(z)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.randn((1, 3, 9, 9)), {"sparsity": 0.25, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9, 9))
assert model.conv1.out_channels == 12
assert model.conv2.in_channels == 48
assert model.conv2.out_channels == 96
def test_group_cat_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 16, (3, 3))
self.conv2 = nn.Conv2d(3, 32, (3, 3))
self.conv3 = nn.Conv2d(48, 64, (3, 3), groups=4)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(x)
cat0 = torch.cat([conv1, conv2], dim=1)
return self.conv3(cat0)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.25, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9, 9))
assert model.conv1.out_channels == 12
assert model.conv2.out_channels == 24
assert model.conv3.in_channels == 36
assert model.conv3.out_channels == 48
def test_nonaligned_cat_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 8, (3, 3))
self.conv2 = nn.Conv2d(3, 4, (3, 3))
self.conv3 = nn.Conv2d(3, 4, (3, 3))
self.conv4 = nn.Conv2d(16, 64, (3, 3))
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(x)
conv3 = self.conv3(x)
cat0 = torch.cat([conv1, conv2, conv3], dim=1)
return self.conv4(cat0)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.25, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9, 9))
assert model.conv1.out_channels == 6
assert model.conv2.out_channels == 3
assert model.conv3.out_channels == 3
assert model.conv4.in_channels == 12
assert model.conv4.out_channels == 48
def test_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 16, (3, 3))
self.conv1 = nn.Conv2d(16, 32, (3, 3), groups=8)
self.conv2 = nn.Conv2d(32, 32, (3, 3))
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
return self.conv2(conv1)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 32, 16, 8)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 32, 16, 8)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
assert model.conv0.out_channels == 8
assert model.conv1.in_channels == 8
assert model.conv1.out_channels == 16
assert model.conv2.out_channels == 16
def test_multi_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 16, (3, 3))
self.conv1 = nn.Conv2d(16, 32, (3, 3), groups=4)
self.conv2 = nn.Conv2d(16, 32, (3, 3), groups=8)
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv2 = self.conv2(conv0)
return conv1, conv2
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 32, 16, 4)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 32, 16, 8)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
assert model.conv0.out_channels == 8
assert model.conv1.in_channels == 8
assert model.conv1.out_channels == 16
assert model.conv2.out_channels == 16
def test_add_cat_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 16, (3, 3))
self.conv1 = nn.Conv2d(16, 32, (3, 3), groups=2)
self.conv2 = nn.Conv2d(16, 32, (3, 3), groups=4)
self.conv3 = nn.Conv2d(3, 16, (3, 3))
self.conv4 = nn.Conv2d(16, 32, (3, 3), groups=8)
self.conv5 = nn.Conv2d(64, 64, (3, 3))
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(conv0)
conv2 = self.conv2(conv0)
add1 = conv1.__add__(conv2)
conv3 = self.conv3(x)
conv4 = self.conv4(conv3)
cat0 = torch.cat([add1, conv4], dim=1)
return self.conv5(cat0)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 32, 16, 4)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 32, 16, 4)
removed_idx_group_check(model.conv4.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv4.masker.ot_remove_idx, 32, 16, 8)
removed_idx_group_check(model.conv5.masker.in_remove_idx[:16], 32, 16, 4)
removed_idx_group_check(model.conv5.masker.in_remove_idx[16:], 32, 16, 8, offset=32)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
def test_multi_cat_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 8, (3, 3))
self.conv1 = nn.Conv2d(8, 16, (3, 3))
self.conv2 = nn.Conv2d(8, 16, (3, 3), groups=4)
self.conv3 = nn.Conv2d(8, 16, (3, 3))
self.conv4 = nn.Conv2d(32, 64, (3, 3))
self.conv5 = nn.Conv2d(32, 64, (3, 3))
def forward(self, x):
conv0 = self.conv0(x)
relu0 = F.relu(conv0)
x1 = self.conv1(relu0)
x2 = self.conv2(relu0)
x3 = self.conv3(relu0)
z1 = torch.cat([x1, x2], dim=1)
z2 = torch.cat([x2, x3], dim=1)
z1 = self.conv4(z1)
z2 = self.conv5(z2)
return z1, z2
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 8, 4, 4)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 8, 4, 4)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 8, 4, 4)
removed_idx_group_check(model.conv3.masker.in_remove_idx, 8, 4, 4)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv4.masker.in_remove_idx[8:], 16, 8, 4, offset=16)
removed_idx_group_check(model.conv5.masker.in_remove_idx[:8], 16, 8, 4)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
def test_split_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 32, (3, 3))
self.conv2 = nn.Conv2d(3, 16, (3, 3))
self.conv3 = nn.Conv2d(3, 16, (3, 3))
self.conv4 = nn.Conv2d(16, 32, (3, 3), groups=2)
self.conv5 = nn.Conv2d(16, 32, (3, 3), groups=4)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(x)
conv3 = self.conv3(x)
size = conv1.shape[1] // 2
sp1, sp2 = torch.split(conv1, size, 1)
add0 = conv2 + sp1
add1 = sp2 + conv3
return self.conv4(add0), self.conv5(add1)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv1.masker.ot_remove_idx[:8], 16, 8, 2)
removed_idx_group_check(model.conv1.masker.ot_remove_idx[8:], 16, 8, 4, offset=16)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 2)
removed_idx_group_check(model.conv3.masker.ot_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv4.masker.in_remove_idx, 16, 8, 2)
removed_idx_group_check(model.conv4.masker.ot_remove_idx, 32, 16, 2)
removed_idx_group_check(model.conv5.masker.in_remove_idx, 16, 8, 4)
removed_idx_group_check(model.conv5.masker.ot_remove_idx, 32, 16, 4)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
def test_group_mul_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(16, 16, (1, 1), groups=8)
self.conv1 = nn.Conv2d(16, 16, (1, 1))
self.conv2 = nn.Conv2d(16, 16, (1, 1), groups=4)
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(x)
add0 = conv0 * conv1
conv2 = self.conv2(add0)
return conv2
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(16, 16, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 4)
pruner.apply_mask()
model(torch.ones(16, 16, 9, 9))
def test_group_add_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(16, 16, (1, 1), groups=8)
self.conv1 = nn.Conv2d(16, 16, (1, 1))
self.conv2 = nn.Conv2d(16, 16, (1, 1), groups=4)
def forward(self, x):
conv0 = self.conv0(x)
conv1 = self.conv1(x)
add0 = conv0 + conv1
conv2 = self.conv2(add0)
return conv2
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(16, 16, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 4)
pruner.apply_mask()
model(torch.ones(16, 16, 9, 9))
def test_center_add_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 32, (1, 1))
self.conv1 = nn.Conv2d(16, 16, (1, 1), groups=8)
self.conv2 = nn.Conv2d(16, 16, (1, 1), groups=2)
def forward(self, x):
conv0 = self.conv0(x)
sp0, sp1 = torch.split(conv0, conv0.shape[1] // 2, 1)
conv1 = self.conv1(sp0)
add0 = conv1 + sp1
conv2 = self.conv2(add0)
return conv2
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 32, 16, 8)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 2)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
def test_center_sub_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 32, (1, 1))
self.conv1 = nn.Conv2d(16, 16, (1, 1), groups=8)
self.conv2 = nn.Conv2d(16, 16, (1, 1), groups=2)
def forward(self, x):
conv0 = self.conv0(x)
sp0, sp1 = torch.split(conv0, conv0.shape[1] // 2, 1)
conv1 = self.conv1(sp0)
add0 = conv1 - sp1
conv2 = self.conv2(add0)
return conv2
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 32, 16, 8)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 2)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
def test_center_div_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 32, (1, 1))
self.conv1 = nn.Conv2d(16, 16, (1, 1), groups=8)
self.conv2 = nn.Conv2d(16, 16, (1, 1), groups=2)
def forward(self, x):
conv0 = self.conv0(x)
sp0, sp1 = torch.split(conv0, conv0.shape[1] // 2, 1)
conv1 = self.conv1(sp0)
add0 = conv1 / sp1
conv2 = self.conv2(add0)
return conv2
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 32, 16, 8)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 2)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
def test_center_mul_group_graph(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv0 = nn.Conv2d(3, 32, (1, 1))
self.conv1 = nn.Conv2d(16, 16, (1, 1), groups=8)
self.conv2 = nn.Conv2d(16, 16, (1, 1), groups=2)
def forward(self, x):
conv0 = self.conv0(x)
sp0, sp1 = torch.split(conv0, conv0.shape[1] // 2, 1)
conv1 = self.conv1(sp0)
add0 = conv1 * sp1
conv2 = self.conv2(add0)
return conv2
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv0.masker.ot_remove_idx, 32, 16, 8)
removed_idx_group_check(model.conv1.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.in_remove_idx, 16, 8, 8)
removed_idx_group_check(model.conv2.masker.ot_remove_idx, 16, 8, 2)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
def test_res_2_net_block(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 64, (1, 1))
self.conv2 = nn.Conv2d(16, 16, (1, 1), groups=8)
self.conv3 = nn.Conv2d(16, 16, (1, 1), groups=2)
self.conv4 = nn.Conv2d(16, 16, (1, 1))
self.conv5 = nn.Conv2d(64, 64, (1, 1))
def forward(self, x):
conv1 = self.conv1(x)
size0 = conv1.shape[1] // 4
split0 = torch.split(conv1, size0, 1)
conv2 = self.conv2(split0[0])
add0 = conv2 + split0[1]
conv3 = self.conv3(add0)
add3 = conv3 + split0[2]
conv4 = self.conv4(add3)
cat0 = torch.cat([conv2, conv3, conv4, split0[3]], 1)
return self.conv5(cat0)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.register_mask()
removed_idx_group_check(model.conv1.masker.ot_remove_idx[:16], 32, 16, 8)
removed_idx_group_check(model.conv1.masker.ot_remove_idx[16:24], 16, 8, 2, offset=32)
pruner.apply_mask()
model(torch.ones(1, 3, 9, 9))
def test_conv1d_block(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv1d(3, 16, (3,))
self.conv2 = nn.Conv1d(16, 32, (3,))
def forward(self, x):
conv1 = self.conv1(x)
return self.conv2(conv1)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9), {"sparsity": 0.25, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9))
assert model.conv1.out_channels == 12
assert model.conv2.out_channels == 24
def test_loop_conv_block(self):
class TestModel(nn.Module):
def __init__(self):
super(TestModel, self).__init__()
self.conv1 = nn.Conv2d(3, 16, (3, 3))
self.conv2 = nn.Conv2d(16, 32, (3, 3))
self.conv3 = nn.Conv2d(16, 32, (3, 3))
self.conv4 = nn.Conv2d(16, 32, (3, 3))
self.conv5 = nn.Conv2d(32, 32, (3, 3))
def forward(self, x):
conv1 = self.conv1(x)
conv2 = F.relu(self.conv2(conv1))
conv3 = F.relu(self.conv3(conv1))
conv4 = F.relu(self.conv4(conv1))
return self.conv5(conv2 + conv3 + conv4)
model = TestModel()
pruner = OneShotChannelPruner(model, torch.ones(1, 3, 9, 9), {"sparsity": 0.5, "metrics": "l2_norm"})
pruner.prune()
model(torch.ones(1, 3, 9, 9))
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Pose
def process_odom(msg):
publisher.publish(msg.pose.pose)
rospy.init_node('converter')
publisher = rospy.Publisher('/tractor_position', Pose, queue_size=10)
subscriber = rospy.Subscriber('/gps/rtkfix', Odometry, process_odom)
rospy.spin()
|
from __future__ import annotations
from identify import cli
def test_identify_cli(capsys):
ret = cli.main(('setup.py',))
out, _ = capsys.readouterr()
assert ret == 0
assert out == '["file", "non-executable", "python", "text"]\n'
def test_identify_cli_filename_only(capsys):
ret = cli.main(('setup.py', '--filename-only'))
out, _ = capsys.readouterr()
assert ret == 0
assert out == '["python", "text"]\n'
def test_identify_cli_filename_only_unidentified(capsys):
ret = cli.main(('x.unknown', '--filename-only'))
out, _ = capsys.readouterr()
assert ret == 1
assert out == ''
def test_file_not_found(capsys):
ret = cli.main(('x.unknown',))
out, _ = capsys.readouterr()
assert ret == 1
assert out == 'x.unknown does not exist.\n'
|
# 6.4
glossary = {
'oop': 'object of power',
'loop': 'don\'t like infinite',
'condition': 'every choice is a renunciation',
'tuple': 'change all or nothing',
'immutable': 'like elders, don\'t change anymore',
'dictionaries': 'contain the knowledge',
'indent': 'the way to be clean',
}
for key, value in glossary.items():
print(f'{key.title()}\'s definiton: {value}')
print('\n')
# 6.5
waters = {
'nile': 'egypt',
'meuse': 'liège',
'sambre': 'charleroi',
}
for water, country in waters.items():
print(f'The {water.title()} runs trough {country.title()}')
print('\n')
for water in sorted(waters.keys()):
print(water.title())
print('\n')
for country in sorted(waters.values()):
print(country.title())
print('\n')
# 6.6
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil': 'python',
}
poll_names = ['bob', 'james', 'thomas', 'jen', 'edward', 'neil']
for name in poll_names:
if name not in favorite_languages:
print(f'Hey {name.title()}, we wich you to take our poll, are you okay ?')
else:
print(f'Thanks {name.title()} for taking the poll !')
|
from django.db.models import Q
from django.utils import timezone
from rest_framework import generics
from functools import reduce
from .models import Meeting
from .serializers import MeetingWriteSerializer, MeetingReadSerializer, \
MeetingSerializer
class MeetingListView(generics.ListCreateAPIView):
serializer_class = MeetingSerializer
# read_serializer_class = MeetingReadSerializer
# write_serializer_class = MeetingWriteSerializer
queryset = Meeting.objects.all()
# def get_serializer_class(self):
# if self.request.method == 'GET':
# return self.read_serializer_class
# else:
# return self.write_serializer_class
def get_queryset(self):
user = self.request.user
day = self.request.query_params.get('day', None)
location_id = self.request.query_params.get('location_id', None)
query = self.request.query_params.get('query', None)
participant_set = user.participant_set.all()
queryset = Meeting.objects.filter(Q(participant__in=participant_set) |
Q(location__manager=user),
Q(start__gte=timezone.now())
).distinct()
if day:
queryset = queryset.filter(start__date=day)
if location_id:
queryset = queryset.filter(location__id=location_id)
if query:
query = query.split(' ')
queryset = queryset.filter(
reduce(lambda x, y: x | y,
[Q(event_name__icontains=word) |
Q(meeting_agenda__icontains=word) for word in query]))
return queryset
|
from django.shortcuts import render
from dwebsocket.decorators import accept_websocket,require_websocket
from collections import defaultdict
from django.http import HttpResponse
# 保存所有接入的用户地址
allconn = defaultdict(list)
@accept_websocket
def echo(request, userid):
allresult = {}
# 获取用户信息
userinfo = request.user
allresult['userinfo'] = userinfo
# 声明全局变量
global allconn
if not request.is_websocket():#判断是不是websocket连接
try:#如果是普通的http方法
message = request.GET['message']
return HttpResponse(message)
except:
return False
else:
# 将链接(请求?)存入全局字典中
allconn[str(userid)] = request.websocket
# 遍历请求地址中的消息
for message in request.websocket:
# 将信息发至自己的聊天框
request.websocket.send(message)
# 将信息发至其他所有用户的聊天框
for i in allconn:
if i != str(userid):
allconn[i].send(message)
|
# https://www.codewars.com/kata/56af1a20509ce5b9b000001e
def travel(r, zipcode):
r_splitted = r.split(',')
house_address = []
house_numbers = []
for address in r_splitted:
if address.count(zipcode) != 0:
address_splitted = address.split()
if zipcode != ' '.join(address_splitted[-2 :]):
address_splitted.clear()
break
house_numbers.append(address_splitted[0])
house_address.append(' '.join(address_splitted[1:-2]))
address_splitted.clear()
if len(house_address) != 0 or len(house_numbers) != 0:
result = zipcode +':'+ ','.join(house_address) + '/' + ','.join(house_numbers)
else:
result = zipcode + ':/'
return result
|
#!/usr/bin/python3
import hashlib
import threading
import time
import config
#findFlag = 0
#result = 0
myPassword = '12DC4DB5B4636DD86F46C138B2EE0386CC1C5C5FBCF5ED302B3144869D9E500B'
myPassword = myPassword.lower()
#12DC4DB5B4636DD86F46C138B2EE0386CC1C5C5FBCF5ED302B3144869D9E500B
#72D96D46543F3B7A9765B96007683315D328C94579B98FDAB4F70F9E47013319 - 19000000 FOR TESTING
class myThread (threading.Thread):
def __init__(self, threadID, name, startNumber, endNumber):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.startNumber = startNumber
self.endNumber = endNumber
def run(self):
print ("Starting " + self.name)
# Get lock to synchronize threads
threadLock.acquire()
rangeSha256(self.startNumber, self.endNumber, self.name)
# Free lock to release next thread
threadLock.release()
def rangeSha256 (startNumber,endNumber,threadName):
for i in range(startNumber-1,endNumber):
a = hashlib.sha256(bytes(str(i), encoding='utf-8')).hexdigest()
if i % 1000000 == 0:
print (" %s %s: %s" % (threadName, time.ctime(time.time()),i))
if a == myPassword or config.findFlag == 1:
if config.findFlag == 1:
print ("Stoping %s" % (threadName))
break
config.result = i
print ("Stoping %s" % (threadName))
config.findFlag = 1
break
threadLock = threading.Lock()
threads = []
for i in range(1,43):
startNumber = round(float(10000000) + float(i - 1) * (float(89999999) / float(42)))
endNumber = round(float(10000000) + float(i) * (float(89999999) / float(42)))
# Create new threads
name = "Thread-"+ str(i)
threads.append(myThread(i, name, startNumber, endNumber))
# Start new Threads
threads[i-1].start()
# Wait for all threads to complete
for t in threads:
t.join()
print ("Exiting Main Thread")
print (config.result)
|
from NeuralNet import NeuralNet
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras import backend
class NeuralNetGamma(NeuralNet):
def beginTraining(self):
self.setTrainingParameters(100000, 5000, 16, 10)
def defineModel(self, inputShape : tuple, outputSize : int):
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', padding = 'same', input_shape=inputShape))
model.add(Conv2D(32, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding = 'same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1028, activation='relu'))
model.add(Dense(1028, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(outputSize, activation='linear'))
return model
|
from CRF import *
|
# IMPORT LIBRARIES
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# IMPORTING DATASET
def rfg(dataset, NOFtrees):
dataset = dataset # pd.read_csv("2_Classification/Social_Network_Ads.csv")
X = dataset.iloc[:, 0:-1].values
y = dataset.iloc[:, -1].values
dataset.plot()
# Splitting Data into Training & Testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Fitting Random Forest class to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=NOFtrees, criterion='entropy', random_state=0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
print('Y_PRED\n',y_pred,'\n')
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
# # Visualising the Training set results
# from matplotlib.colors import ListedColormap
# X_set, y_set = X_train, y_train
# X1, X2 = np.meshgrid(np.arange(start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01),
# np.arange(start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01))
# plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
# alpha=0.75, cmap=ListedColormap(('red', 'green')))
# plt.xlim(X1.min(), X1.max())
# plt.ylim(X2.min(), X2.max())
# for i, j in enumerate(np.unique(y_set)):
# plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
# c=ListedColormap(('red', 'green'))(i), label=j)
# plt.title('Random forest classification (Train set)')
# plt.xlabel('fetal state class')
# plt.ylabel('fetal cardiotocograms')
# plt.legend()
# plt.show()
#
# # Visualising the Test set results
# from matplotlib.colors import ListedColormap
# X_set, y_set = X_test, y_test
# X1, X2 = np.meshgrid(np.arange(start=X_set[:, 0].min() - 1, stop=X_set[:, 0].max() + 1, step=0.01),
# np.arange(start=X_set[:, 1].min() - 1, stop=X_set[:, 1].max() + 1, step=0.01))
# plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
# alpha=0.75, cmap=ListedColormap(('red', 'green')))
# plt.xlim(X1.min(), X1.max())
# plt.ylim(X2.min(), X2.max())
# for i, j in enumerate(np.unique(y_set)):
# plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
# c=ListedColormap(('red', 'green'))(i), label=j)
# plt.title('Random forest classification (Test set)')
# plt.xlabel('fetal state class')
# plt.ylabel('fetal cardiotocograms')
# plt.legend()
# plt.show()
|
# -*- coding: utf-8 -*-
""" gtorch_utils/segmentation/loss_functions/test/test_fp_rate """
import torch
import unittest
from gtorch_utils.segmentation.loss_functions.fp_rate import FPR_Loss
from gtorch_utils.segmentation.metrics.fp_rate import fpr
class Test_FPR_Loss(unittest.TestCase):
def setUp(self):
self.pred = torch.Tensor([
[[1., 0., 0., 1., 0., 0., 0.], [1., 0., 0., 1., 1., 1., 0.]],
[[0., 1., 1., 1., 0., 1., 0.], [0., 0., 0., 0., 1., 1., 0.]]
])
self.gt = torch.Tensor([
[[1., 1., 1., 0., 0., 0., 1.], [1., 1., 1., 0., 0., 0., 1.]],
[[1., 1., 1., 0., 0., 0., 1.], [1., 1., 1., 0., 0., 0., 1.]]
])
def test_per_channel_False(self):
self.assertTrue(torch.equal(
1-fpr(self.pred, self.gt),
FPR_Loss()(self.pred, self.gt)
))
def test_per_channel_True(self):
self.assertTrue(torch.equal(
1-fpr(self.pred, self.gt, True),
FPR_Loss(True)(self.pred, self.gt)
))
if __name__ == '__main__':
unittest.main()
|
# 분해합
# 브루트포스의 정석
# 각각 1부터 쭉 해주는 것에 for문을 쓰는데 인색하지 말자.
# 정말 앞에서부터 하나하나 쓰는 것을 중심으로 가야한다.
# [boj-백준] Brute force 2231 분해 합 - python
n = int(input())
for i in range(1, n+1):
a = list(map(int, str(i)))
sum_sep = sum(a)
sum_all = i + sum_sep
if sum_all == n:
print(i)
break
if i == n:
print(0)
|
import socket
from struct import *
str=input("Enter the Direction : ")
num=int(str)
s = socket.socket()
host = "192.168.43.223"
port = 8001
s.connect((host,port))
print("connection established")
#s.send(val.encode())
val = pack('!i', num)
s.send(val)
|
'''
A + B
Problem Description
Given a series of integer pairs and , output the sum of and .
Input Format
Each line contains two integers, and . One input file may contain several pairs where .
Output Format
Output a single integer per line - The sum of and .
SAMPLE INPUT
1 2
2 5
10 14
SAMPLE OUTPUT
3
7
24
'''
from sys import stdin
sums = []
for i in stdin:
data = list(map(int, i.split()))
sums.append(sum(data))
print(*sums,sep="\n")
|
__all__ = ["check_surr", "gradient", "folder_name_func", "index_list_func", "model_input_func", "model_sig_func", "plot_pred_func", "plot_sig_func", "pred_comp_func", "read", "rename_func", "sig_data_func", "tanh_func", "upper_shelf_func"]
|
from .theplatform import ThePlatformIE
from ..utils import (
ExtractorError,
GeoRestrictedError,
int_or_none,
update_url_query,
urlencode_postdata,
)
class AENetworksBaseIE(ThePlatformIE):
_BASE_URL_REGEX = r'''(?x)https?://
(?:(?:www|play|watch)\.)?
(?P<domain>
(?:history(?:vault)?|aetv|mylifetime|lifetimemovieclub)\.com|
fyi\.tv
)/'''
_THEPLATFORM_KEY = '43jXaGRQud'
_THEPLATFORM_SECRET = 'S10BPXHMlb'
_DOMAIN_MAP = {
'history.com': ('HISTORY', 'history'),
'aetv.com': ('AETV', 'aetv'),
'mylifetime.com': ('LIFETIME', 'lifetime'),
'lifetimemovieclub.com': ('LIFETIMEMOVIECLUB', 'lmc'),
'fyi.tv': ('FYI', 'fyi'),
'historyvault.com': (None, 'historyvault'),
'biography.com': (None, 'biography'),
}
def _extract_aen_smil(self, smil_url, video_id, auth=None):
query = {'mbr': 'true'}
if auth:
query['auth'] = auth
TP_SMIL_QUERY = [{
'assetTypes': 'high_video_ak',
'switch': 'hls_high_ak'
}, {
'assetTypes': 'high_video_s3'
}, {
'assetTypes': 'high_video_s3',
'switch': 'hls_high_fastly',
}]
formats = []
subtitles = {}
last_e = None
for q in TP_SMIL_QUERY:
q.update(query)
m_url = update_url_query(smil_url, q)
m_url = self._sign_url(m_url, self._THEPLATFORM_KEY, self._THEPLATFORM_SECRET)
try:
tp_formats, tp_subtitles = self._extract_theplatform_smil(
m_url, video_id, 'Downloading %s SMIL data' % (q.get('switch') or q['assetTypes']))
except ExtractorError as e:
if isinstance(e, GeoRestrictedError):
raise
last_e = e
continue
formats.extend(tp_formats)
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
if last_e and not formats:
raise last_e
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'subtitles': subtitles,
}
def _extract_aetn_info(self, domain, filter_key, filter_value, url):
requestor_id, brand = self._DOMAIN_MAP[domain]
result = self._download_json(
'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand,
filter_value, query={'filter[%s]' % filter_key: filter_value})['results'][0]
title = result['title']
video_id = result['id']
media_url = result['publicUrl']
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
info = self._parse_theplatform_metadata(theplatform_metadata)
auth = None
if theplatform_metadata.get('AETN$isBehindWall'):
resource = self._get_mvpd_resource(
requestor_id, theplatform_metadata['title'],
theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
theplatform_metadata['ratings'][0]['rating'])
auth = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
info.update(self._extract_aen_smil(media_url, video_id, auth))
info.update({
'title': title,
'series': result.get('seriesName'),
'season_number': int_or_none(result.get('tvSeasonNumber')),
'episode_number': int_or_none(result.get('tvSeasonEpisodeNumber')),
})
return info
class AENetworksIE(AENetworksBaseIE):
IE_NAME = 'aenetworks'
IE_DESC = 'A+E Networks: A&E, Lifetime, History.com, FYI Network and History Vault'
_VALID_URL = AENetworksBaseIE._BASE_URL_REGEX + r'''(?P<id>
shows/[^/]+/season-\d+/episode-\d+|
(?:
(?:movie|special)s/[^/]+|
(?:shows/[^/]+/)?videos
)/[^/?#&]+
)'''
_TESTS = [{
'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1',
'info_dict': {
'id': '22253814',
'ext': 'mp4',
'title': 'Winter is Coming',
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
'timestamp': 1338306241,
'upload_date': '20120529',
'uploader': 'AENE-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
'skip': 'This video is only available for users of participating TV providers.',
}, {
'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
'info_dict': {
'id': '600587331957',
'ext': 'mp4',
'title': 'Inlawful Entry',
'description': 'md5:57c12115a2b384d883fe64ca50529e08',
'timestamp': 1452634428,
'upload_date': '20160112',
'uploader': 'AENE-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
'only_matching': True
}, {
'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6',
'only_matching': True
}, {
'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie',
'only_matching': True
}, {
'url': 'https://watch.lifetimemovieclub.com/movies/10-year-reunion/full-movie',
'only_matching': True
}, {
'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special',
'only_matching': True
}, {
'url': 'https://www.aetv.com/specials/hunting-jonbenets-killer-the-untold-story/preview-hunting-jonbenets-killer-the-untold-story',
'only_matching': True
}, {
'url': 'http://www.history.com/videos/history-of-valentines-day',
'only_matching': True
}, {
'url': 'https://play.aetv.com/shows/duck-dynasty/videos/best-of-duck-dynasty-getting-quack-in-shape',
'only_matching': True
}]
def _real_extract(self, url):
domain, canonical = self._match_valid_url(url).groups()
return self._extract_aetn_info(domain, 'canonical', '/' + canonical, url)
class AENetworksListBaseIE(AENetworksBaseIE):
def _call_api(self, resource, slug, brand, fields):
return self._download_json(
'https://yoga.appsvcs.aetnd.com/graphql',
slug, query={'brand': brand}, data=urlencode_postdata({
'query': '''{
%s(slug: "%s") {
%s
}
}''' % (resource, slug, fields),
}))['data'][resource]
def _real_extract(self, url):
domain, slug = self._match_valid_url(url).groups()
_, brand = self._DOMAIN_MAP[domain]
playlist = self._call_api(self._RESOURCE, slug, brand, self._FIELDS)
base_url = 'http://watch.%s' % domain
entries = []
for item in (playlist.get(self._ITEMS_KEY) or []):
doc = self._get_doc(item)
canonical = doc.get('canonical')
if not canonical:
continue
entries.append(self.url_result(
base_url + canonical, AENetworksIE.ie_key(), doc.get('id')))
description = None
if self._PLAYLIST_DESCRIPTION_KEY:
description = playlist.get(self._PLAYLIST_DESCRIPTION_KEY)
return self.playlist_result(
entries, playlist.get('id'),
playlist.get(self._PLAYLIST_TITLE_KEY), description)
class AENetworksCollectionIE(AENetworksListBaseIE):
IE_NAME = 'aenetworks:collection'
_VALID_URL = AENetworksBaseIE._BASE_URL_REGEX + r'(?:[^/]+/)*(?:list|collections)/(?P<id>[^/?#&]+)/?(?:[?#&]|$)'
_TESTS = [{
'url': 'https://watch.historyvault.com/list/america-the-story-of-us',
'info_dict': {
'id': '282',
'title': 'America The Story of Us',
},
'playlist_mincount': 12,
}, {
'url': 'https://watch.historyvault.com/shows/america-the-story-of-us-2/season-1/list/america-the-story-of-us',
'only_matching': True
}, {
'url': 'https://www.historyvault.com/collections/mysteryquest',
'only_matching': True
}]
_RESOURCE = 'list'
_ITEMS_KEY = 'items'
_PLAYLIST_TITLE_KEY = 'display_title'
_PLAYLIST_DESCRIPTION_KEY = None
_FIELDS = '''id
display_title
items {
... on ListVideoItem {
doc {
canonical
id
}
}
}'''
def _get_doc(self, item):
return item.get('doc') or {}
class AENetworksShowIE(AENetworksListBaseIE):
IE_NAME = 'aenetworks:show'
_VALID_URL = AENetworksBaseIE._BASE_URL_REGEX + r'shows/(?P<id>[^/?#&]+)/?(?:[?#&]|$)'
_TESTS = [{
'url': 'http://www.history.com/shows/ancient-aliens',
'info_dict': {
'id': 'SERIES1574',
'title': 'Ancient Aliens',
'description': 'md5:3f6d74daf2672ff3ae29ed732e37ea7f',
},
'playlist_mincount': 150,
}]
_RESOURCE = 'series'
_ITEMS_KEY = 'episodes'
_PLAYLIST_TITLE_KEY = 'title'
_PLAYLIST_DESCRIPTION_KEY = 'description'
_FIELDS = '''description
id
title
episodes {
canonical
id
}'''
def _get_doc(self, item):
return item
class HistoryTopicIE(AENetworksBaseIE):
IE_NAME = 'history:topic'
IE_DESC = 'History.com Topic'
_VALID_URL = r'https?://(?:www\.)?history\.com/topics/[^/]+/(?P<id>[\w+-]+?)-video'
_TESTS = [{
'url': 'https://www.history.com/topics/valentines-day/history-of-valentines-day-video',
'info_dict': {
'id': '40700995724',
'ext': 'mp4',
'title': "History of Valentine’s Day",
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
'timestamp': 1375819729,
'upload_date': '20130806',
'uploader': 'AENE-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
}]
def _real_extract(self, url):
display_id = self._match_id(url)
return self.url_result(
'http://www.history.com/videos/' + display_id,
AENetworksIE.ie_key())
class HistoryPlayerIE(AENetworksBaseIE):
IE_NAME = 'history:player'
_VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:history|biography)\.com)/player/(?P<id>\d+)'
_TESTS = []
def _real_extract(self, url):
domain, video_id = self._match_valid_url(url).groups()
return self._extract_aetn_info(domain, 'id', video_id, url)
class BiographyIE(AENetworksBaseIE):
_VALID_URL = r'https?://(?:www\.)?biography\.com/video/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.biography.com/video/vincent-van-gogh-full-episode-2075049808',
'info_dict': {
'id': '30322987',
'ext': 'mp4',
'title': 'Vincent Van Gogh - Full Episode',
'description': 'A full biography about the most influential 20th century painter, Vincent Van Gogh.',
'timestamp': 1311970571,
'upload_date': '20110729',
'uploader': 'AENE-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
player_url = self._search_regex(
r'<phoenix-iframe[^>]+src="(%s)' % HistoryPlayerIE._VALID_URL,
webpage, 'player URL')
return self.url_result(player_url, HistoryPlayerIE.ie_key())
|
from user import User #Importing the user class
from credentials import Credentials #Importing the credentials class
import unittest #Importing python unittest module for testing
class TestPassword(unittest.TestCase):
'''
Test class to test user and credentials classes behaviours.
TestCase class helps in creating test cases.
'''
def setUp(self):
'''
setUp method to run before every test case.
'''
self.new_user = User("John Mac","Miley","1234")
self.new_credentials = Credentials("twitter","Aldis","1234")
def tearDown(self):
'''
tearDown method to clean up after each test case has run.
'''
User.users_list = []
Credentials.credentials_list = []
def test_init(self):
'''
test_init to test proper initialization of objects' properties
'''
self.assertEqual(self.new_user.official_name,"John Mac")
self.assertEqual(self.new_user.username,"Miley")
self.assertEqual(self.new_user.password,"1234")
self.assertEqual(self.new_credentials.account,"twitter")
self.assertEqual(self.new_credentials.username,"Aldis")
self.assertEqual(self.new_credentials.password,"1234")
def test_save_user(self):
'''
test_save_user to test if username in added to username list and credentials to credentials list
'''
self.new_user.save_user() #save user(s) official name(s)
self.new_credentials.save_credentials() #save new credentials for user(s)
self.assertEqual(len(User.users_list),1) #checking if users list length has increased
self.assertEqual(len(Credentials.credentials_list),1) #checking if credentials list length has increased
def test_save_multiple_credentials(self):
'''
Test to confirm if multiple credentials can be saved in the credentials list
'''
self.new_credentials.save_credentials()
test_credential = Credentials("facebook","bale","meforyou") # new credential details
test_credential.save_credentials()
#error message in case test case fails
message = "Unable to add multiple credentials to credentials list"
self.assertGreater(len(Credentials.credentials_list),1,message) #Checking if length of credentials list has increased
def test_save_multiple_users(self):
'''
Test to confirm if multiple users can create accounts
'''
self.new_user.save_user()
test_user = User("Mercy Mumbi","Alice","9876") #new user account
test_user.save_user()
self.assertEqual(len(User.users_list),2)
def test_find_account_by_password(self):
'''
Test to confirm if a user(s) account can be accessed by the inputted password and credentials information displayed
'''
self.new_user.save_user()
test_user = User("Mercy Mumbi","Alice","9876") #new user account
test_user.save_user()
found_user = User.find_by_password("9876")
self.assertEqual(found_user.password,test_user.password)
def test_find_credential(self):
'''
Test to confirm whether the credential located is the correct one
'''
self.new_credentials.save_credentials()
test_credential = Credentials("facebook","bale","meforyou") # new credential details
test_credential.save_credentials()
found_credential = Credentials.find_credential("facebook")
self.assertEqual(found_credential.account,test_credential.account)
def test_delete_credentials_account(self):
'''
Test to confirm whether a credentials account has been deleted
'''
self.new_credentials.save_credentials()
test_credential = Credentials("facebook","bale","meforyou") # new credential details
test_credential.save_credentials()
test_credential.delete_credential_account("facebook") #Deleting a credential account
self.assertEqual(len(Credentials.credentials_list),1)
if __name__ == '__main__':
unittest.main()
|
'''
You are playing the following Nim Game with your friend: There is a heap of stones on the table, each time one of you take turns to remove 1 to 3 stones. The one who removes the last stone will be the winner. You will take the first turn to remove the stones.
Both of you are very clever and have optimal strategies for the game. Write a function to determine whether you can win the game given the number of stones in the heap.
'''
class Solution:
def canWinNim(self, n: int) -> bool:
if n%4 == 0:
return False
return True
# slow solution
winner = [-1]*(n+10)
winner[0] = 0
winner[1] = 1
for i in range(n+4):
if winner[i] == 0:
winner[i+1] = 1
winner[i+2] = 1
winner[i+3] = 1
elif winner[i] == 1:
if winner[i+1] == -1:
winner[i+1] = 0
if winner[i+2] == -1:
winner[i+2] = 0
if winner[i+3] == -1:
winner[i+3] = 0
if winner[n] == 1:
return True
return False
|
from django.contrib.auth import get_permission_codename
from django.contrib.contenttypes.models import ContentType
from wagtail.wagtailsnippets.models import get_snippet_models
def get_permission_name(action, model):
return "%s.%s" % (model._meta.app_label, get_permission_codename(action, model._meta))
def user_can_edit_snippet_type(user, model_or_content_type):
""" true if user has 'add', 'change' or 'delete' permission on this model """
if isinstance(model_or_content_type, ContentType):
model = model_or_content_type.model_class()
else:
model = model_or_content_type
for action in ('add', 'change', 'delete'):
if user.has_perm(get_permission_name(action, model)):
return True
return False
def user_can_edit_snippets(user):
"""
true if user has 'add', 'change' or 'delete' permission
on any model registered as a snippet type
"""
snippet_models = get_snippet_models()
for model in snippet_models:
if user_can_edit_snippet_type(user, model):
return True
return False
|
from cm.util import misc
from cm.util import paths
import logging
log = logging.getLogger('cloudman')
class CloudInterface(object):
# Global fields
ec2_conn = None
s3_conn = None
tags_supported = False
tags = {}
# Instance details
ami = None
instance_type = None
instance_id = None
instance = None # boto object representation of the instance
zone = None
region = None
region_name = None
security_groups = None
key_pair_name = None
self_private_ip = None
local_hostname = None
self_public_ip = None
public_hostname = None
instance_type = None
fqdn = None
user_data = None
aws_access_key = None
aws_secret_key = None
def get_user_data(self, force=False):
""" Override this method in a cloud-specific interface if the
default approach does not apply.
NOTE that this method should call the set_configuration method!
:type force: boolean
:param force: If set to True, reload the user data regardless if
the data is already stored in the class field
:rtype: dict
:return: A key-value dictionary containing the user data.
"""
if self.user_data is None or force:
self.user_data = misc.load_yaml_file(paths.USER_DATA_FILE)
self.set_configuration()
return self.user_data
def set_configuration(self):
""" Set the configuration fields for the given cloud interface.
This should primarily be used to set credentials and such.
It is expected that this method is called in the process of loading
the user data.
This method should be overriden for any cloud interface that requires
more credentials than the access credentials included in this default
implementation.
"""
if self.user_data is None:
self.get_user_data()
self.aws_access_key = self.user_data.get('access_key', None)
self.aws_secret_key = self.user_data.get('secret_key', None)
self.tags = {}
def get_configuration(self):
""" Return a dict with all the class variables.
"""
return vars(self)
# Non-implemented methods
def get_local_hostname(self):
log.warning("Unimplemented")
pass
def run_instances(self, num, instance_type, **kwargs):
""" Run an image.
"""
log.warning("Unimplemented")
pass
|
# -*- coding: utf-8 -*-
# @Author : Lin Lan (ryan.linlan@gmail.com)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from collections import defaultdict
import ray
import numpy as np
from ray.rllib.optimizers.policy_optimizer import PolicyOptimizer
from utils import summarize_episodes
logger = logging.getLogger("ray.rllib.agents.maml.maml_optimizer")
class MAMLOptimizer(PolicyOptimizer):
def _init(self, num_inner_updates=1, num_sgd_iter=1):
self.num_inner_updates = num_inner_updates
self.num_sgd_iter = num_sgd_iter
def sync_weights(self):
if self.remote_evaluators:
weights = self.local_evaluator.get_weights()
weights_id = ray.put(weights)
# import ipdb; ipdb.set_trace()
# for e in self.remote_evaluators:
# e.set_weights.remote(weights_id)
ray.get([e.set_weights.remote(weights_id)
for e in self.remote_evaluators])
return weights
else:
raise TypeError
def step(self):
# distribute weights of the model and the outer optimizer
prev_weights = self.sync_weights()
# perform the inner update in each remote evaluator
goals = ray.get([
e.inner_update.remote(self.num_inner_updates)
for e in self.remote_evaluators])
assert isinstance(goals, list)
goals = sorted(goals, key=lambda x: (x[0], x[1]))
logger.debug(f"\ngoals:\n{np.asarray(goals)}")
best_policy_loss = None
best_infos = None
best_weights = None
last_weights = None
kl_target = self.local_evaluator.policy_config["kl_target"]
# gather the gradients and update the variables in the local evaluator
for i in range(self.num_sgd_iter):
dist_outer_grad_values, dist_outer_infos = zip(
*ray.get([e.outer_update.remote()
for e in self.remote_evaluators]))
aggregated_grads = defaultdict(list)
aggregated_infos = defaultdict(list)
for outer_grad_values, outer_infos \
in zip(dist_outer_grad_values, dist_outer_infos):
for name, values in outer_grad_values.items():
aggregated_grads[name].append(values)
for name, infos in outer_infos.items():
aggregated_infos[name].append(infos)
aggregated_grads = dict(aggregated_grads)
aggregated_infos = dict(aggregated_infos)
grad_values = {k: np.mean(v, axis=0)
for k, v in aggregated_grads.items()}
infos = {k: np.mean(v) for k, v in aggregated_infos.items()}
logger.debug(f"\niter: {i}, infos: {infos}")
if infos["kl"] < kl_target:
if best_policy_loss is None \
or infos["policy_loss"] < best_policy_loss:
best_policy_loss = infos["policy_loss"]
best_infos = infos
best_weights = last_weights or prev_weights
if i < self.num_sgd_iter - 1:
fetches = self.local_evaluator.apply_gradients(grad_values)
assert fetches == {}
last_weights = self.sync_weights()
self.local_evaluator.set_weights(best_weights)
self.sync_weights()
return best_infos
def collect_metrics(self):
dist_episodes = ray.get([
e.apply.remote(lambda ev: ev.episodes)
for e in self.remote_evaluators])
aggregated_episodes = defaultdict(list)
for episodes in dist_episodes:
for k, v in episodes.items():
aggregated_episodes[k].extend(v)
aggregated_episodes = dict(aggregated_episodes)
res = {k: summarize_episodes(v, v, 0)
for k, v in aggregated_episodes.items()}
return {"inner_update_metrics": res}
# "reward_attr": res[
# str(self.num_inner_updates)]["episode_reward_mean"]}
|
import numpy as np
def test_BEM():
"""Unnecessary test for the ugly and non-working and legacy BEM
for the sake of coverage
"""
from Florence.BoundaryElements import GetBasesBEM2D
from Florence.BoundaryElements import GenerateCoordinates
from Florence.BoundaryElements import CoordsJacobianRadiusatGaussPoints, CoordsJacobianRadiusatGaussPoints_LM
from Florence.BoundaryElements import AssemblyBEM2D
from Florence.BoundaryElements.Assembly import AssemblyBEM2D_Sparse
from Florence.BoundaryElements import Sort_BEM
from Florence import QuadratureRule, FunctionSpace, Mesh
# Unnecessary loop
for i in range(10):
mesh = Mesh()
mesh.element_type = "line"
mesh.points = np.array([
[0.,0.],
[1.,0.],
[1.,1.],
[0.,1.],
])
mesh.elements = np.array([
[0,1],
[1,2],
[2,3],
[3,0],
])
mesh.nelem = 4
q = QuadratureRule(mesh_type="line")
for C in range(10):
N, dN = GetBasesBEM2D(C,q.points)
N, dN = GetBasesBEM2D(2,q.points)
global_coord = np.zeros((mesh.points.shape[0],3))
global_coord[:,:2] = mesh.points
Jacobian = 2*np.ones((q.weights.shape[0],mesh.nelem))
nx = 4*np.ones((q.weights.shape[0],mesh.nelem))
ny = 3*np.ones((q.weights.shape[0],mesh.nelem))
XCO = 2*np.ones((q.weights.shape[0],mesh.nelem))
YCO = np.ones((q.weights.shape[0],mesh.nelem))
N = np.ones((mesh.elements.shape[1],q.weights.shape[0]))
dN = 0.5*np.ones((mesh.elements.shape[1],q.weights.shape[0]))
GenerateCoordinates(mesh.elements,mesh.points,0,q.points)
CoordsJacobianRadiusatGaussPoints(mesh.elements,global_coord,0,N,dN,q.weights)
# Not working
# CoordsJacobianRadiusatGaussPoints_LM(mesh.elements,global_coord[:,:3],0,N,dN,q.weights,mesh.elements)
class GeoArgs(object):
Lagrange_Multipliers = "activated"
def __init__(self):
Lagrange_Multipliers = "activated"
geo_args = GeoArgs()
K1, K2 = AssemblyBEM2D(0,global_coord,mesh.elements,mesh.elements,dN,N,
q.weights,q.points,Jacobian, nx, ny, XCO, YCO, geo_args)
AssemblyBEM2D_Sparse(0,global_coord,mesh.elements,mesh.elements,dN,N,
q.weights,q.points,Jacobian, nx, ny, XCO, YCO, geo_args)
bdata = np.zeros((2*mesh.points.shape[0],2))
bdata[:4,1] = -1
bdata[4:,0] = -1
Sort_BEM(bdata,K1, K2)
if __name__ == "__main__":
test_BEM()
|
from conans import CMake, ConanFile, tools
from conans.errors import ConanInvalidConfiguration
import functools
import os
required_conan_version = ">=1.45.0"
class EdynConan(ConanFile):
name = "edyn"
description = "Edyn is a real-time physics engine organized as an ECS"
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/xissburg/edyn"
topics = ("physics", "game-development", "ecs")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"floating_type": ["float", "double"],
}
default_options = {
"shared": False,
"fPIC": True,
"floating_type": "float",
}
generators = "cmake", "cmake_find_package_multi"
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("entt/3.9.0")
@property
def _compiler_required(self):
return {
"gcc": "9.3", # GCC 9.3 started supporting attributes in constructor arguments
}
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 17)
try:
minimum_required_compiler_version = self._compiler_required[str(self.settings.compiler)]
if tools.Version(self.settings.compiler.version) < minimum_required_compiler_version:
raise ConanInvalidConfiguration("This package requires C++17 support. The current compiler does not support it.")
except KeyError:
self.output.warn("This recipe has no support for the current compiler. Please consider adding it.")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
@functools.lru_cache(1)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["EDYN_INSTALL"] = True
cmake.configure(build_folder=self._build_subfolder)
return cmake
def build(self):
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "share"))
tools.remove_files_by_mask(os.path.join(self.package_folder, "bin"), "*.pdb")
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.set_property("cmake_find_mode", "both")
self.cpp_info.set_property("cmake_module_file_name", "Edyn")
self.cpp_info.set_property("cmake_file_name", "Edyn")
self.cpp_info.set_property("cmake_target_name", "Edyn::Edyn")
self.cpp_info.set_property("pkg_config_name", "Edyn")
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs += ["m", "pthread"]
elif self.settings.os == "Windows":
self.cpp_info.system_libs = ["winmm"]
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.filenames["cmake_find_package"] = "Edyn"
self.cpp_info.filenames["cmake_find_package_multi"] = "Edyn"
self.cpp_info.names["cmake_find_package"] = "Edyn"
self.cpp_info.names["cmake_find_package_multi"] = "Edyn"
|
print('os numeros pares entre 1 e 50 sao:')
for c in range(0,51,2):
print(c)
print('FIM')
escolha4 = ''
while escolha4 != 'sim' and escolha4 != 'nao':
escolha4 = str(input('você deseja executar novamente [sim/nao]?')).lower()
if escolha4 == 'sim':
import jogo_do_tio_Dodo
if escolha4 == 'nao':
print('obrigado por ultilizar nossos serviços')
break
|
import unittest
import sys
from flow_stress.pt_conditions import *
depth = 10
density = 2.7
geothermal_gradient = 30
class Test_pt_conditions(unittest.TestCase):
def test_pt_conditions_initiates(self):
self.pt = PTCalculator(depth, density, geothermal_gradient)
self.assertEqual(self.pt.gravity, 9.8)
self.assertEqual(self.pt.density, 2700)
self.assertEqual(self.pt.depth, 10)
self.assertEqual(self.pt.geothermal_gradient, 30)
def test_if_pt_calculator_works(self):
self.pt = PTCalculator(depth, density, geothermal_gradient)
self.p, self.t = self.pt.pt_calculator()
self.assertEqual(self.p[0], 264.6000000000001)
self.assertEqual(self.t[0], 300)
def test_if_pt_calculator_pressure_value_works(self):
self.single_depth = 10
self.pt = PTCalculator(depth, density, geothermal_gradient)
self.p, self.t = self.pt.pt_calculator()
self.single_p = self.pt.pt_calculator_pressure_value(self.p, self.single_depth)
self.assertEqual(self.single_p, 264.6000000000001)
#if single_pressure_value == False:
# print('You will have multiple pressure values')
|
# !/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from collections import Counter
import itertools
import iterators
import os
import numpy as np
import pandas as pd
import mxnet as mx
import argparse
import pickle
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Deep neural network for multivariate time series forecasting",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', type=str, default='../data',
help='relative path to input data')
parser.add_argument('--output-dir', type=str, default='../results',
help='directory to save model files to')
parser.add_argument('--max-records', type=int, default=None,
help='total records before data split')
parser.add_argument('--train_fraction', type=float, default=0.8,
help='fraction of data to use for training. remainder used for testing.')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size.')
parser.add_argument('--buckets', type=str, default="",
help='unique bucket sizes')
parser.add_argument('--char-embed', type=int, default=25,
help='Embedding size for each unique character.')
parser.add_argument('--char-filter-list', type=str, default="3,4,5",
help='unique filter sizes for char level cnn')
parser.add_argument('--char-filters', type=int, default=20,
help='number of each filter size')
parser.add_argument('--word-embed', type=int, default=500,
help='Embedding size for each unique character.')
parser.add_argument('--word-filter-list', type=str, default="3,4,5",
help='unique filter sizes for char level cnn')
parser.add_argument('--word-filters', type=int, default=200,
help='number of each filter size')
parser.add_argument('--lstm-state-size', type=int, default=100,
help='number of hidden units in each unrolled recurrent cell')
parser.add_argument('--lstm-layers', type=int, default=1,
help='number of recurrent layers')
parser.add_argument('--gpus', type=str, default='',
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ')
parser.add_argument('--optimizer', type=str, default='adam',
help='the optimizer type')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout rate for network')
parser.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
parser.add_argument('--save-period', type=int, default=20,
help='save checkpoint for every n epochs')
parser.add_argument('--model_prefix', type=str, default='electricity_model',
help='prefix for saving model params')
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def save_model():
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
return mx.callback.do_checkpoint(os.path.join(args.output_dir, "checkpoint"), args.save_period)
def build_vocab(nested_list):
"""
:param nested_list: list of list of string
:return: dictionary mapping from string to int, inverse of that dictionary
"""
# Build vocabulary
word_counts = Counter(itertools.chain(*nested_list))
logging.info("build_vocab: word_counts=%d" % (len(word_counts)))
# Mapping from index to label
vocabulary_inv = [x[0] for x in word_counts.most_common()]
# Mapping from label to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return vocabulary, vocabulary_inv
def build_iters(data_dir, max_records, train_fraction, batch_size, buckets=None):
"""
Reads a csv of sentences/tag sequences into a pandas dataframe.
Converts into X = array(list(int)) & Y = array(list(int))
Splits into training and test sets
Builds dictionaries mapping from index labels to labels/ indexed features to features
:param data_dir: directory to read in csv data from
:param max_records: total number of records to randomly select from input data
:param train_fraction: fraction of the data to use for training
:param batch_size: records in mini-batches during training
:param buckets: size of each bucket in the iterators
:return: train_iter, val_iter, word_to_index, index_to_word, pos_to_index, index_to_pos
"""
# Read in data as numpy array
df = pd.read_pickle(os.path.join(data_dir, "ner_data.pkl"))[:max_records]
# Get feature lists
entities=[list(array) for array in df["BILOU_tag"].values]
sentences = [list(array) for array in df["token"].values]
chars=[[[c for c in word] for word in sentence] for sentence in sentences]
# Build vocabularies
entity_to_index, index_to_entity = build_vocab(entities)
word_to_index, index_to_word = build_vocab(sentences)
char_to_index, index_to_char = build_vocab([np.array([c for c in word]) for word in index_to_word])
save_obj(entity_to_index, os.path.join(args.data_dir, "tag_to_index"))
# Map strings to integer values
indexed_entities=[list(map(entity_to_index.get, l)) for l in entities]
indexed_tokens=[list(map(word_to_index.get, l)) for l in sentences]
indexed_chars=[[list(map(char_to_index.get, word)) for word in sentence] for sentence in chars]
# Split into training and testing data
idx=int(len(indexed_tokens)*train_fraction)
logging.info("Preparing train/test datasets splitting at idx %d on total %d sentences using a batchsize of %d", idx, len(indexed_tokens), batch_size)
X_token_train, X_char_train, Y_train = indexed_tokens[:idx], indexed_chars[:idx], indexed_entities[:idx]
X_token_test, X_char_test, Y_test = indexed_tokens[idx:], indexed_chars[idx:], indexed_entities[idx:]
# build iterators to feed batches to network
train_iter = iterators.BucketNerIter(sentences=X_token_train, characters=X_char_train, label=Y_train,
max_token_chars=5, batch_size=batch_size, buckets=buckets)
logging.info("Creating the val_iter using %d sentences", len(X_token_test))
val_iter = iterators.BucketNerIter(sentences=X_token_test, characters=X_char_test, label=Y_test,
max_token_chars=train_iter.max_token_chars, batch_size=batch_size, buckets=train_iter.buckets)
return train_iter, val_iter, word_to_index, char_to_index, entity_to_index
def sym_gen(seq_len):
"""
Build NN symbol depending on the length of the input sequence
"""
sentence_shape = train_iter.provide_data[0][1]
char_sentence_shape = train_iter.provide_data[1][1]
entities_shape = train_iter.provide_label[0][1]
X_sent = mx.symbol.Variable(train_iter.provide_data[0].name)
X_char_sent = mx.symbol.Variable(train_iter.provide_data[1].name)
Y = mx.sym.Variable(train_iter.provide_label[0].name)
###############################
# Character embedding component
###############################
char_embeddings = mx.sym.Embedding(data=X_char_sent, input_dim=len(char_to_index), output_dim=args.char_embed, name='char_embed')
char_embeddings = mx.sym.reshape(data=char_embeddings, shape=(0,1,seq_len,-1,args.char_embed), name='char_embed2')
char_cnn_outputs = []
for i, filter_size in enumerate(args.char_filter_list):
# Kernel that slides over entire words resulting in a 1d output
convi = mx.sym.Convolution(data=char_embeddings, kernel=(1, filter_size, args.char_embed), stride=(1, 1, 1),
num_filter=args.char_filters, name="char_conv_layer_" + str(i))
acti = mx.sym.Activation(data=convi, act_type='tanh')
pooli = mx.sym.Pooling(data=acti, pool_type='max', kernel=(1, char_sentence_shape[2] - filter_size + 1, 1),
stride=(1, 1, 1), name="char_pool_layer_" + str(i))
pooli = mx.sym.transpose(mx.sym.Reshape(pooli, shape=(0, 0, 0)), axes=(0, 2, 1), name="cchar_conv_layer_" + str(i))
char_cnn_outputs.append(pooli)
# combine features from all filters & apply dropout
cnn_char_features = mx.sym.Concat(*char_cnn_outputs, dim=2, name="cnn_char_features")
regularized_cnn_char_features = mx.sym.Dropout(data=cnn_char_features, p=args.dropout, mode='training',
name='regularized charCnn features')
##################################
# Combine char and word embeddings
##################################
word_embeddings = mx.sym.Embedding(data=X_sent, input_dim=len(word_to_index), output_dim=args.word_embed, name='word_embed')
rnn_features = mx.sym.Concat(*[word_embeddings, regularized_cnn_char_features], dim=2, name='rnn input')
##############################
# Bidirectional LSTM component
##############################
# unroll the lstm cell in time, merging outputs
bi_cell.reset()
output, states = bi_cell.unroll(length=seq_len, inputs=rnn_features, merge_outputs=True)
# Map to num entity classes
rnn_output = mx.sym.Reshape(output, shape=(-1, args.lstm_state_size * 2), name='r_output')
fc = mx.sym.FullyConnected(data=rnn_output, num_hidden=len(entity_to_index), name='fc_layer')
# reshape back to same shape as loss will be
reshaped_fc = mx.sym.transpose(mx.sym.reshape(fc, shape=(-1, seq_len, len(entity_to_index))), axes=(0, 2, 1))
sm = mx.sym.SoftmaxOutput(data=reshaped_fc, label=Y, ignore_label=-1, use_ignore=True, multi_output=True, name='softmax')
return sm, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label]
def train(train_iter, val_iter):
import metrics
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [mx.gpu(int(i)) for i in args.gpus.split(',')]
logging.info("train on device %s using optimizer %s at learningrate %f for %d epochs using %d records: lstm_state_size=%d ...",
devs, args.optimizer, args.lr, args.num_epochs, args.max_records, args.lstm_state_size)
module = mx.mod.BucketingModule(sym_gen, train_iter.default_bucket_key, context=devs)
module.fit(train_data=train_iter,
eval_data=val_iter,
eval_metric=metrics.composite_classifier_metrics(),
optimizer=args.optimizer,
optimizer_params={'learning_rate': args.lr },
initializer=mx.initializer.Uniform(0.1),
num_epoch=args.num_epochs,
epoch_end_callback=save_model())
if __name__ == '__main__':
# parse args
args = parser.parse_args()
args.buckets = list(map(int, args.buckets.split(','))) if len(args.buckets) > 0 else None
args.char_filter_list = list(map(int, args.char_filter_list.split(',')))
# Build data iterators
train_iter, val_iter, word_to_index, char_to_index, entity_to_index = build_iters(args.data_dir, args.max_records,
args.train_fraction, args.batch_size, args.buckets)
logging.info("validation iterator: %s", val_iter)
# Define the recurrent layer
bi_cell = mx.rnn.SequentialRNNCell()
for layer_num in range(args.lstm_layers):
bi_cell.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(num_hidden=args.lstm_state_size, prefix="forward_layer_" + str(layer_num)),
mx.rnn.LSTMCell(num_hidden=args.lstm_state_size, prefix="backward_layer_" + str(layer_num))))
bi_cell.add(mx.rnn.DropoutCell(args.dropout))
train(train_iter, val_iter)
|
from turtle import *
shape("arrow")
speed(2)
up()
goto(-50,0)
down()
bgcolor("dodgerblue")
color("white")
begin_fill()
for i in range(5):
forward(100)
right(144)
end_fill()
hideturtle()
|
from conf import spaces_settings
def spaces_processor(request):
""" Set global template variables. """
return {
'site_name': spaces_settings['SITE_NAME']
}
|
__author__ = 'vitor'
from django.conf import settings
from django.core.mail import send_mail
from collections import defaultdict
import string
def enviar_email(obj):
email_assunto = settings.EMAIL_SUBJECT
habilidades = get_usuario_habilidades(obj)
if habilidades:
for habilidade in habilidades:
mensagem = get_mensagem(habilidade=habilidade)
send_mail(email_assunto, mensagem, settings.EMAIL_HOST_USER, [obj.email,])
else:
mensagem = get_mensagem()
send_mail(email_assunto, mensagem, settings.EMAIL_HOST_USER, [obj.email,])
def get_usuario_habilidades(obj):
habilidades = []
if obj.html_c > 6 and obj.css_c > 6 and obj.javascript_c > 6:
habilidades.append('Front End ')
if obj.python_c > 6 and obj.django_c > 6:
habilidades.append('Back End ')
if obj.ios_c > 6 and obj.android_c > 6:
habilidades.append('Mobile ')
return habilidades
def get_mensagem(**kwargs):
return string.Formatter().vformat(settings.EMAIL_MESSAGE, (), defaultdict(str, **kwargs))
|
from datetime import datetime, date
from marqeta.response_models import datetime_object
import json
import re
class StoreResponseModel(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def name(self):
return self.json_response.get('name', None)
@property
def active(self):
return self.json_response.get('active', None)
@property
def contact(self):
return self.json_response.get('contact', None)
@property
def contact_email(self):
return self.json_response.get('contact_email', None)
@property
def longitude(self):
return self.json_response.get('longitude', None)
@property
def latitude(self):
return self.json_response.get('latitude', None)
@property
def address1(self):
return self.json_response.get('address1', None)
@property
def address2(self):
return self.json_response.get('address2', None)
@property
def city(self):
return self.json_response.get('city', None)
@property
def state(self):
return self.json_response.get('state', None)
@property
def province(self):
return self.json_response.get('province', None)
@property
def zip(self):
return self.json_response.get('zip', None)
@property
def postal_code(self):
return self.json_response.get('postal_code', None)
@property
def phone(self):
return self.json_response.get('phone', None)
@property
def country(self):
return self.json_response.get('country', None)
@property
def token(self):
return self.json_response.get('token', None)
@property
def partial_auth_flag(self):
return self.json_response.get('partial_auth_flag', None)
@property
def mid(self):
return self.json_response.get('mid', None)
@property
def network_mid(self):
return self.json_response.get('network_mid', None)
@property
def merchant_token(self):
return self.json_response.get('merchant_token', None)
@property
def partial_approval_capable(self):
return self.json_response.get('partial_approval_capable', None)
@property
def keyed_auth_cvv_enforced(self):
return self.json_response.get('keyed_auth_cvv_enforced', None)
@property
def created_time(self):
if 'created_time' in self.json_response:
return datetime_object('created_time', self.json_response)
@property
def last_modified_time(self):
if 'last_modified_time' in self.json_response:
return datetime_object('last_modified_time', self.json_response)
def __repr__(self):
return '<Marqeta.response_models.store_response_model.StoreResponseModel>' + self.__str__()
|
import taichi as ti
@ti.archs_support_sparse
def test_pointer():
x = ti.field(ti.f32)
s = ti.field(ti.i32)
n = 16
ptr = ti.root.pointer(ti.i, n)
ptr.dense(ti.i, n).place(x)
ti.root.place(s)
s[None] = 0
@ti.kernel
def activate():
ti.activate(ptr, 1)
ti.activate(ptr, 32)
@ti.kernel
def func():
for i in x:
s[None] += 1
activate()
func()
assert s[None] == 32
test_pointer()
|
from lib.docker import client
from settings.docker import HOSTNAME
def all(driver='bridge'):
return client.networks.list(filters={'driver': driver})
def get(network_id):
return client.networks.get(network_id)
def get_containers_from(network_id):
network = get(network_id)
if network:
return network.containers
return None
def get_current():
"""Returns the first network occurrance"""
me = client.containers.get(HOSTNAME)
nets = all()
for net in nets:
net.reload()
if me in net.containers:
return net, me
return None, me
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Report."""
import ast
class ReportRecord(object):
"""Record Class to record all data in one search loop."""
def __init__(self, step_name=None, worker_id=None, **kwargs):
self._step_name = step_name
self._worker_id = worker_id
self._desc = None
self._performance = None
self._checkpoint_path = None
self._model_path = None
self._weights_file = None
self._info = None
self._epoch = 0
self._objectives = {}
self._objective_keys = None
self._rewards = None
self._runtime = {}
if kwargs:
for key, value in kwargs.items():
setattr(self, key, value)
def __hash__(self):
"""Override hash code."""
return hash(self.uid)
def __eq__(self, other):
"""Override eq func, step name and worker id is same."""
return self.uid == other.uid
def __repr__(self):
"""Override repr, output all record attrs."""
return str(
{'step_name': self._step_name, 'worker_id': self._worker_id, 'desc': self._desc, 'epoch': self._epoch,
'performance': self._performance, 'checkpoint_path': self._checkpoint_path,
'model_path': self._model_path, 'weights_file': self._weights_file, 'info': self._info,
'objectives': self._objectives, '_objective_keys': self._objective_keys, 'rewards': self.rewards,
'runtime': self._runtime})
def __gt__(self, other):
"""Override gt for sorted according to performance attr."""
return self.rewards > other.rewards
@property
def uid(self):
"""Uid for record. ReadOnly."""
return '{}_{}_{}'.format(self.step_name, self.worker_id, self.epoch)
@property
def epoch(self):
"""Get epoch."""
return self._epoch
@epoch.setter
def epoch(self, value):
"""Set epoch."""
self._epoch = value
@property
def step_name(self):
"""Get Step name."""
return self._step_name
@step_name.setter
def step_name(self, value):
"""Set Step name."""
self._step_name = value
@property
def worker_id(self):
"""Get worker id."""
return self._worker_id
@worker_id.setter
def worker_id(self, value):
"""Set worker id."""
self._worker_id = value
@property
def desc(self):
"""Get desc."""
return self._desc
@desc.setter
def desc(self, value):
"""Set desc and parse value into dict."""
if isinstance(value, str):
value = ast.literal_eval(value)
self._desc = value
@property
def performance(self):
"""Get performance."""
return self._performance
@performance.setter
def performance(self, value):
"""Set performance and parse value into dict."""
if isinstance(value, str):
value = ast.literal_eval(value)
self._performance = value
@property
def checkpoint_path(self):
"""Get checkpoint_path."""
return self._checkpoint_path
@checkpoint_path.setter
def checkpoint_path(self, value):
"""Set checkpoint_path and parse value into dict."""
self._checkpoint_path = value
@property
def model_path(self):
"""Get model_path."""
return self._model_path
@model_path.setter
def model_path(self, value):
"""Set model_path and parse value into dict."""
self._model_path = value
@property
def weights_file(self):
"""Get weights file."""
return self._weights_file
@weights_file.setter
def weights_file(self, value):
"""Set weights_file and parse value int dict."""
self._weights_file = value
@property
def info(self):
"""Get rung id."""
return self._info
@info.setter
def info(self, value):
"""Set rung id."""
self._info = value
@property
def objectives(self):
"""Get objectives."""
return self._objectives
@objectives.setter
def objectives(self, value):
"""Set objectives."""
self._objectives = value
@property
def objective_keys(self):
"""Get objective_keys."""
return self._objective_keys
@objective_keys.setter
def objective_keys(self, value):
"""Set objective_keys."""
self._objective_keys = value if isinstance(value, list) else [value]
@property
def rewards(self):
"""Get reward_performance(ReadOnly)."""
if not self.performance:
return None
if isinstance(self.performance, list):
return self.performance
if not self.objective_keys:
self._objective_keys = list(self.performance.keys())
res = []
for obj in self.objective_keys:
if isinstance(obj, int):
obj = list(self.performance.keys())[obj]
value = self.performance.get(obj)
if value is None:
raise ValueError("objective_keys in search_algorithm should be the same in trainer.metrics.")
if self.objectives.get(obj) == 'MIN':
value = -value
res.append(value)
return res[0] if len(res) == 1 else res
@rewards.setter
def rewards(self, value):
"""Get rewards, ReadOnly property."""
self._rewards = value
@property
def runtime(self):
"""Get runtime."""
return self._runtime
@runtime.setter
def runtime(self, value):
"""Set runtime."""
self._runtime = value
@classmethod
def from_dict(cls, src_dic):
"""Create report class from dict."""
src_cls = cls()
if src_dic:
for key, value in src_dic.items():
setattr(src_cls, key, value)
return src_cls
def load_dict(self, src_dic):
"""Load values from dict."""
if src_dic:
for key, value in src_dic.items():
setattr(self, key, value)
return self
def from_sample(self, sample, desc=None):
"""Load values from sample."""
if isinstance(sample, tuple):
sample = dict(worker_id=sample[0], desc=sample[1])
self.load_dict(sample)
if desc:
self.desc = desc
return self
def serialize(self):
"""Serialize record class into a dict."""
return ast.literal_eval(self.__repr__())
|
# Built-in modules #
import os, sh, glob, shutil, stat
# Internal modules #
from seqenv.common import unzip
from seqenv.common.cache import property_cached
################################################################################
class DirectoryPath(str):
def __repr__(self): return '<%s object "%s">' % (self.__class__.__name__, self.path)
@classmethod
def clean_path(cls, path):
"""Given a path, return a cleaned up version for initialization."""
# Conserve 'None' object style #
if path is None: return None
# Don't nest DirectoryPaths or the like #
if hasattr(path, 'path'): path = path.path
# Expand the tilda #
if "~" in path: path = os.path.expanduser(path)
# Our standard is to end with a slash for directories #
if not path.endswith('/'): path += '/'
# Return the result #
return path
def __new__(cls, path, *args, **kwargs):
"""A DirectoryPath is in fact a string"""
return str.__new__(cls, cls.clean_path(path))
def __init__(self, path):
self.path = self.clean_path(path)
@property
def name(self):
"""Just the directory name"""
return os.path.basename(os.path.dirname(self.path))
@property
def exists(self):
"""Does it exist in the file system?"""
return os.path.lexists(self.path) # Include broken symlinks
@property
def absolute_path(self):
"""The absolute path starting with a `/`"""
return os.path.abspath(self.path) + '/'
@property
def is_symlink(self):
"""Is this directory a symbolic link to an other directory?"""
return os.path.islink(self.path.rstrip('/'))
@property
def directory(self):
"""The full path of directory containing this one"""
return DirectoryPath(os.path.dirname(os.path.dirname(self.path)))
@property
def permissions(self):
"""Convenience object for dealing with permissions."""
return FilePermissions(self.path)
def remove(self):
if not self.exists: return False
if self.is_symlink: return self.remove_when_symlink()
shutil.rmtree(self.path, ignore_errors=True)
return True
def remove_when_symlink(self):
if not self.exists: return False
os.remove(self.path.rstrip('/'))
return True
def create(self, safe=False, inherit=True):
# Create it #
if not safe:
os.makedirs(self.path)
if inherit: os.chmod(self.path, self.directory.permissions.number)
if safe:
try:
os.makedirs(self.path)
if inherit: os.chmod(self.path, self.directory.permissions.number)
except OSError: pass
def create_if_not_exists(self):
if not self.exists: self.create()
################################################################################
class FilePath(str):
"""I can never remember all those darn `os.path` commands, so I made a class
that wraps them with an easier and more pythonic syntax.
path = FilePath('/home/root/text.txt')
print path.extension
print path.directory
print path.filename
You can find lots of the common things you would need to do with file paths.
Such as: path.make_executable() etc etc."""
def __new__(cls, path, *args, **kwargs):
"""A FilePath is in fact a string"""
return str.__new__(cls, cls.clean_path(path))
def __init__(self, path):
self.path = self.clean_path(path)
def __iter__(self): return open(self.path)
def __len__(self):
if self.path is None: return 0
return self.count_lines
@classmethod
def clean_path(cls, path):
"""Given a path, return a cleaned up version for initialization"""
# Conserve None object style #
if path is None: return None
# Don't nest FilePaths or the like #
if hasattr(path, 'path'): path = path.path
# Expand tilda #
if "~" in path: path = os.path.expanduser(path)
# Expand star #
if "*" in path:
matches = glob.glob(path)
if len(matches) < 1: raise Exception("Found exactly no files matching '%s'" % path)
if len(matches) > 1: raise Exception("Found several files matching '%s'" % path)
path = matches[0]
# Return the result #
return path
@property_cached
def count_lines(self):
return int(sh.wc('-l', self.path).split()[0])
@property
def exists(self):
"""Does it exist in the file system. Returns True or False."""
return os.path.lexists(self.path)
@property
def prefix_path(self):
"""The full path without the (last) extension and trailing period"""
return str(os.path.splitext(self.path)[0])
@property
def prefix(self):
"""Just the filename without the (last) extension and trailing period"""
return str(os.path.basename(self.prefix_path))
@property
def filename(self):
"""Just the filename with the extension"""
return str(os.path.basename(self.path))
@property
def extension(self):
"""The extension with the leading period"""
return os.path.splitext(self.path)[1]
@property
def absolute_path(self):
"""The absolute path starting with a `/`"""
return os.path.abspath(self.path)
@property
def directory(self):
"""The directory containing this file"""
# The built-in function #
directory = os.path.dirname(self.path)
# Maybe we need to go the absolute path way #
if not directory: directory = os.path.dirname(self.absolute_path)
# Return #
return DirectoryPath(directory+'/')
@property
def count_bytes(self):
"""The number of bytes"""
if not self.exists: return 0
return os.path.getsize(self.path)
def remove(self):
if not self.exists: return False
os.remove(self.path)
return True
def write(self, contents):
with open(self.path, 'w') as handle: handle.write(contents)
def writelines(self, contents):
with open(self.path, 'w') as handle: handle.writelines(contents)
def must_exist(self):
"""Raise an exception if the path doesn't exist."""
if not self.exists: raise Exception("The file path '%s' does not exist." % self.path)
@property
def size(self):
"""Human readable file size"""
return Filesize(self.count_bytes)
def link_from(self, path, safe=False):
"""Make a link here pointing to another file somewhere else.
The destination is hence self.path and the source is *path*."""
# Get source and destination #
source = path
destination = self.path
# Do it #
if not safe:
if os.path.exists(destination): os.remove(destination)
os.symlink(source, destination)
# Do it safely #
if safe:
try: os.remove(destination)
except OSError: pass
try: os.symlink(source, destination)
except OSError: pass
def link_to(self, path, safe=False, absolute=True):
"""Create a link somewhere else pointing to this file.
The destination is hence *path* and the source is self.path."""
# Get source and destination #
if absolute: source = self.absolute_path
else: source = self.path
destination = path
# Do it #
if not safe:
if os.path.exists(destination): os.remove(destination)
os.symlink(source, destination)
# Do it safely #
if safe:
try: os.remove(destination)
except OSError: pass
try: os.symlink(source, destination)
except OSError: pass
def unzip_to(self, destination=None, inplace=False):
"""Make an unzipped version of the file at a given path"""
return unzip(self.path, destination=destination, inplace=inplace)
################################################################################
class FilePermissions(object):
"""Container for reading and setting a files permissions"""
def __init__(self, path):
self.path = path
@property
def number(self):
"""The permission bits as an octal integer"""
return os.stat(self.path).st_mode & 0777
def make_executable(self):
return os.chmod(self.path, os.stat(self.path).st_mode | stat.S_IEXEC)
def only_readable(self):
"""Remove all writing privileges"""
return os.chmod(self.path, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
################################################################################
class Filesize(object):
"""
Container for a size in bytes with a human readable representation
Use it like this:
>>> size = Filesize(123123123)
>>> print size
'117.4 MiB'
"""
chunk = 1000 # Could be 1024 if you like old-style sizes
units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB']
precisions = [0, 0, 1, 2, 2, 2]
def __init__(self, size):
self.size = size
def __int__(self):
return self.size
def __eq__(self, other):
return self.size == other
def __str__(self):
if self.size == 0: return '0 bytes'
from math import log
unit = self.units[min(int(log(self.size, self.chunk)), len(self.units) - 1)]
return self.format(unit)
def format(self, unit):
# Input checking #
if unit not in self.units: raise Exception("Not a valid file size unit: %s" % unit)
# Special no plural case #
if self.size == 1 and unit == 'bytes': return '1 byte'
# Compute #
exponent = self.units.index(unit)
quotient = float(self.size) / self.chunk**exponent
precision = self.precisions[exponent]
format_string = '{:.%sf} {}' % (precision)
# Return a string #
return format_string.format(quotient, unit)
|
from num2words import num2words
import Python_Education_Base.GeekBrains_Python_Base_Work_With_Files_Practice.generators as Generarators
import re
filename = 'ex4.txt'
Generarators.ex4_gen(filename, 1000)
with open(filename, 'r') as f:
pattern = re.compile(r"(.+)-(\d+)", re.UNICODE)
for line in f.readlines():
(name, digit) = pattern.search(line).groups()
digit = int(digit)
print(f'{num2words(digit, lang="ru")}-{digit}')
|
from __future__ import print_function
import json
import boto3
import traceback
ddb = boto3.client('dynamodb', region_name = 'cn-northwest-1')
#print('Loading function')
event_delete = {
"Records": [
{
"eventID": "9bc813d4c24bcc307147ac25a170b44d",
"eventVersion": "1.1",
"dynamodb": {
"Keys": {
"recordId": {
"N": "0"
}
},
"ApproximateCreationDateTime": 1542016080.0,
"StreamViewType": "NEW_IMAGE",
"SequenceNumber": "4795000000000000599541765",
"SizeBytes": 9
},
"awsRegion": "cn-north-1",
"eventName": "REMOVE",
"eventSourceARN": "arn:aws-cn:dynamodb:cn-north-1:741251161495:table/ddb_stream_test/stream/2018-11-11T09:16:30.755",
"eventSource": "aws:dynamodb"
}
]
}
event_insert = {
"Records": [
{
"eventID": "880dff58ebad9ab4b164ba6d63e96d92",
"eventVersion": "1.1",
"dynamodb": {
"SequenceNumber": "3270900000000001940388956",
"Keys": {
"recordId": {
"N": "3"
}
},
"SizeBytes": 70,
"NewImage": {
"name": {
"S": "ccc"
},
"gold": {
"N": "110"
},
"level": {
"N": "3"
},
"recordId": {
"N": "3"
},
"timestamp": {
"N": "1541990916"
},
"role": {
"S": "freelancer"
}
},
"ApproximateCreationDateTime": 1541990940.0,
"StreamViewType": "NEW_IMAGE"
},
"awsRegion": "cn-north-1",
"eventName": "INSERT",
"eventSourceARN": "arn:aws-cn:dynamodb:cn-north-1:741251161495:table/ddb_stream_test/stream/2018-11-11T09:16:30.755",
"eventSource": "aws:dynamodb"
}
]
}
event = event_insert
def lambda_handler(event, context):
#print("Received event: " + json.dumps(event, indent=2))
for record in event['Records']:
print("eventID " + record['eventID'])
print("eventName " + record['eventName'])
#print("DynamoDB Record: " + json.dumps(record['dynamodb'], indent=2))
#print("DynamoDB Record: " + json.dumps(record['dynamodb'], indent=2))
#print("New Image: " + json.dumps(record['dynamodb']['NewImage']))
#print(record['dynamodb']['Keys'])
if record['eventName'] == 'INSERT' or record['eventName'] == 'MODIFY':
try:
response = ddb.put_item(
TableName = 'ddb_stream_test_zhy',
Item = record['dynamodb']['NewImage']
)
except Exception:
print("cannot put_item to ddb")
#traceback.print_exc()
print(traceback.format_exc())
else:
print(response)
if record['eventName'] == 'REMOVE':
try:
response = ddb.delete_item(
TableName = 'ddb_stream_test_zhy',
Key = record['dynamodb']['Keys']
)
except Exception:
print("cannot delete_item to ddb")
print(traceback.format_exc())
else:
print(response)
return 'Successfully processed {} records.'.format(len(event['Records']))
if __name__ == "__main__":
context = None
print(lambda_handler(event, context))
|
import re
from urllib.request import urlretrieve
dev_settings_path = "hifireg/hifireg/settings/developer.py"
def wget(url, dest_path):
urlretrieve(url, dest_path)
def get_current_database_postfix():
try:
with open(dev_settings_path, "r") as f:
for line in f:
match_list = re.findall("hifiregdb_(.*)'", line)
if match_list:
return match_list[0]
except FileNotFoundError:
pass
return "dev"
def replace_database_postfix(postfix_default):
postfix = input(f"Enter your database postfix [{postfix_default}]: ")
postfix = postfix if postfix else postfix_default
db_original = "hifiregdb_dev"
db_new = f"hifiregdb_{postfix}"
print(f"Your database name is: {db_new}")
with open(dev_settings_path, "r") as f:
lines = f.readlines()
with open(dev_settings_path, "w") as f:
for line in lines:
f.write(re.sub(db_original, db_new, line))
def get_dev_settings(url, dest_path):
postfix_default = get_current_database_postfix()
wget(url, dest_path)
replace_database_postfix(postfix_default)
|
import requests
import flask
import yfinance as yf
from pandas_datareader import data as pdr
import dash_table
import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
from datetime import datetime as dt
from datetime import date
import re
import plotly.express as px
import dash_table
from app import app
from utils import Header
layout = dbc.Container([
Header(app),
dcc.Store(id="ticker-info"),
dcc.Store(id="ticker-stock-prices"),
dbc.Row([
# Left Config
dbc.Col([
html.I("Please enter a ticker name"),
html.Br(),
dcc.Input(id="ticker_name", type="text", placeholder="", debounce=True),
# html.Div(id="output"),
]),
# Right Config
dbc.Col([
dcc.DatePickerRange(
id='my-date-picker-range',
min_date_allowed=dt(1995, 8, 5),
max_date_allowed=date.today(),
initial_visible_month=dt(2017, 8, 5),
end_date=date.today(),
),
])
], style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': 'rgb(250, 250, 250)',
'padding': '10px 5px'
}),
# Left Graph
html.Div([
dcc.Graph(id="line-graph")
]),
# html.Div(id="ticker-info-summary", style={'display':'none'}, children=[
# ])
html.Details([
html.Summary('Business Summary'),
html.Div(id="ticker-info-summary")
]),
dbc.Row([
dbc.Col([dcc.Graph(id="polar-graph-1")]),
dbc.Col([dcc.Graph(id="polar-graph-2")])
]),
html.Div([
dash_table.DataTable(
id='datatable-row-ids',
columns=[{'name': 'Date', 'id': 'Date', 'deletable': True}, {'name': 'Open', 'id': 'Open', 'deletable': True}, {'name': 'High', 'id': 'High', 'deletable': True}, {'name': 'Low', 'id': 'Low', 'deletable': True}, {'name': 'Close', 'id': 'Close', 'deletable': True}, {'name': 'Adj Close', 'id': 'Adj Close', 'deletable': True}, {'name': 'Volume', 'id': 'Volume', 'deletable': True}],
page_current=0,
page_size=10,
page_action='custom'
)
])
# # Right Graph
# html.Div([
# dcc.Graph(id='x-time-series'),
# dcc.Graph(id='y-time-series'),
# ], style={'display': 'inline-block', 'width': '49%'}),
# Slider
# html.Div(dcc.Slider(
# id='crossfilter-year--slider',
# min=df['Year'].min(),
# max=df['Year'].max(),
# value=df['Year'].max(),
# marks={str(year): str(year) for year in df['Year'].unique()},
# step=None
# ), style={'width': '49%', 'padding': '0px 20px 20px 20px'})
])
def get_period_data(ticker_name, start_date, end_date):
# download dataframe
df = pdr.get_data_yahoo(ticker_name, start=start_date, end=end_date)
for col in df.columns:
df[col] = df[col].map(lambda x: '{0:.2f}'.format(x))
df.reset_index(inplace=True)
fig = px.line(df, x='Date', y = [col for col in df.columns if (col != 'Date' and col != 'Volume')], title= f"Stock Price for {ticker_name.upper()}")
return fig, df.to_dict('records')
def get_ticker_info(ticker_name):
try:
data = yf.Ticker(ticker_name)
return data.info
except:
return {}
@app.callback(
[Output('line-graph', 'figure'),
Output('ticker-info', 'data'),
Output('ticker-stock-prices', 'data')],
[Input('my-date-picker-range', 'start_date'),
Input('my-date-picker-range', 'end_date'),
Input('ticker_name', 'value')])
def update_output(start_date, end_date, ticker_name):
fig = px.line()
ticker_info = {}
stock_prices = {}
if start_date is not None:
start_date = dt.strptime(re.split('T| ', start_date)[0], '%Y-%m-%d')
if end_date is not None:
end_date = dt.strptime(re.split('T| ', end_date)[0], '%Y-%m-%d')
if start_date and end_date and ticker_name:
print (f"start date is: {start_date}")
print (f"end date is: {end_date}")
fig, stock_prices = get_period_data(ticker_name, start_date, end_date)
fig.update_layout(transition_duration=500)
ticker_info = get_ticker_info(ticker_name)
return fig, ticker_info, stock_prices
@app.callback(
Output('ticker-info-summary', 'children'),
[Input('ticker-info', 'data')]
)
def update_summary(data):
if data:
if 'longBusinessSummary' in data:
return data['longBusinessSummary']
else:
return "Summary Not Available"
else:
return ""
@app.callback(
[Output("polar-graph-1", 'figure'),
Output("polar-graph-2", 'figure')],
[Input('ticker_name', 'value')]
)
def update_recommendations(ticker_name):
try:
ticker_name = ticker_name.upper()
data = yf.Ticker(ticker_name)
recommends = data.recommendations
one_month_recommends = recommends[recommends.index >= date.today() - pd.DateOffset(months=1)]
three_month_recommends = recommends[recommends.index >= date.today() - pd.DateOffset(months=3)]
one_month_value_counts = one_month_recommends['To Grade'].value_counts(dropna=True).to_dict()
one_month_value_counts = {k: v for k, v in one_month_value_counts.items() if k}
one_month_df = pd.DataFrame(dict(
r = list(one_month_value_counts.values()),
theta = list(one_month_value_counts.keys())
))
one_month_fig = px.line_polar(one_month_df, r='r', theta='theta', line_close=True, title= f'1 Month Recommendations for {ticker_name}')
one_month_fig.update_traces(fill='toself')
three_month_value_counts = three_month_recommends['To Grade'].value_counts(dropna=True).to_dict()
three_month_value_counts = {k: v for k, v in three_month_value_counts.items() if k}
three_month_df = pd.DataFrame(dict(
r = list(three_month_value_counts.values()),
theta = list(three_month_value_counts.keys())
))
three_month_fig = px.line_polar(three_month_df, r='r', theta='theta', line_close=True, title= f'3 Months Recommendations for {ticker_name}')
three_month_fig.update_traces(fill='toself')
return one_month_fig, three_month_fig
except:
return px.line_polar(), px.line_polar()
@app.callback(
Output("datatable-row-ids", 'data'),
[Input('ticker-stock-prices', 'data'),
Input('datatable-row-ids', "page_current"),
Input('datatable-row-ids', "page_size")]
)
def update_table(data, page_current, page_size):
try:
if data:
return data[page_current*page_size:(page_current+ 1)*page_size]
except:
return []
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import csv
import logging
import gzip
from bs4 import BeautifulSoup
from scraper import SimpleScraper, SeleniumScraper
from random import randint
import requests
IA_WEB_BASE_URL = 'http://web.archive.org'
def setup_logger():
""" Set up logging
"""
logfilename = "ia-scraper.log"
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=logfilename,
filemode='a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logfilename
def download_webpage(url, filepath, compress=False, selenium=False):
scraper = SimpleScraper()
html = scraper.get(url)
if selenium:
if not html or html.find('Redirecting to...') != -1:
return
scraper = SeleniumScraper()
html = scraper.get(url)
scraper.driver.close()
if not html:
html = ''
logging.info("Saving to file {0:s}".format(filepath))
if compress:
with gzip.open(filepath, 'wb') as f:
f.write(bytes(html, 'utf-8'))
else:
with open(filepath, 'w', encoding='utf-8') as f:
f.write(html)
def get_web_archive_snapshots(base_url, ia_url, year):
url_fmt = '{0:s}/__wb/calendarcaptures?url={1:s}&selected_year={2:d}'
url = url_fmt.format(base_url, ia_url, year)
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
headers = {'User-Agent': user_agent}
r = requests.get(url, headers=headers)
ts = []
if r.status_code == 200:
a = r.json()
for b in a:
for c in b:
for d in c:
if d and 'ts' in d:
ts.extend(d['ts'])
url_fmt = '/web/{0:d}/{1:s}'
snapshots = []
for s in ts:
url = url_fmt.format(s, ia_url)
snapshots.append(url)
return snapshots
if __name__ == "__main__":
logfilename = setup_logger()
parser = argparse.ArgumentParser(description='Homepages scraper')
parser.add_argument('input', default=None)
parser.add_argument('-c', '--config', default='top10.cfg',
help='Configuration file')
parser.add_argument('-d', '--dir', default='internet_archive',
help='Output directory for HTML files')
parser.add_argument('--overwritten', dest='overwritten',
action='store_true',
help='Overwritten if HTML file exists')
parser.set_defaults(overwritten=False)
parser.add_argument('-s', '--statistics', dest='statistics',
action='store_true',
help='Run the script to count amount of snapshots')
parser.set_defaults(statistics=False)
parser.add_argument('--compress', dest='compress',
action='store_true',
help='Compress download HTML files')
parser.set_defaults(compress=False)
parser.add_argument('--selenium', dest='selenium',
action='store_true',
help='Use Selenium to download dynamics HTML content')
parser.set_defaults(selenium=False)
args = parser.parse_args()
logging.info(args)
# to keep scraped data
if not os.path.exists(args.dir):
os.makedirs(args.dir)
with open(args.input) as f:
reader = csv.DictReader(f)
total = 0
for r in reader:
src = r['src']
ia_url = r['ia_url']
if ia_url == '':
continue
end = int(r['ia_year_end'][:4])
begin = int(r['ia_year_begin'][:4])
current = end
sub_total = 0
while current >= begin:
logging.info("Visit yearly snapshots: {0:d}".format(current))
links = get_web_archive_snapshots(IA_WEB_BASE_URL, ia_url, current)
if not args.statistics:
for l in links:
href = l
print(href)
today = href.split('/')[2]
logging.info("Today: {0:s}".format(today))
month = int(today[4:6])
date = today[:8]
if date <= r['ia_year_begin'] or date >= r['ia_year_end']:
continue
filename = '{0:s}_ia_{1:s}.html'.format(src, today)
filepath = os.path.join(args.dir, filename)
if args.compress:
filepath += '.gz'
if args.overwritten or not os.path.exists(filepath):
url = IA_WEB_BASE_URL + href
download_webpage(url, filepath, args.compress, args.selenium)
else:
logging.info("Existing, skipped...")
#break
logging.info("Year: {0:d}, {1:d} snapshots"
.format(current, len(links)))
sub_total += len(links)
current -= 1
#break
logging.info("Source: {0:s}, {1:d} snapshots"
.format(src, sub_total))
total += sub_total
#break
logging.info("Total: {0:d} snapshots".format(total))
logging.info("Done")
|
from graphene_django.types import DjangoObjectType
from kollect.models import Like
class LikeNode(DjangoObjectType):
class Meta:
model = Like
name = "Like"
|
""" Countries and cities models """
from django.db import models
class Country(models.Model):
name = models.CharField("Country name", max_length=50)
def __str__(self):
return self.name
class City(models.Model):
name = models.CharField("City name", max_length=50)
country_id = models.ForeignKey(Country, on_delete=models.CASCADE)
def __str__(self):
return f'{self.name}, {self.country_id}'
|
from test import *
from MR import *
t=Test()
def test_nbit_odd(data):
out=nbit_odd(*data)
if out%2==0:
return False
n=data[0]
for i in range(n-1):
out//=2
if out>2:
return False
out//=2
if out!=0:
return False
return True
t.regist(nbit_odd)
t.regist(test_nbit_odd)
t.add(nbit_odd,[10])
t.add(nbit_odd,[1024])
t.regist(is_MRnonwitness)
t.add(is_MRnonwitness,[6,7,True])
t.add(is_MRnonwitness,[2,101,True])
t.add(is_MRnonwitness,[4,101,True])
t.add(is_MRnonwitness,[66,101,True])
t.add(is_MRnonwitness,[67,101,True])
t.add(is_MRnonwitness,[3,25,False])
def test_generate_prime(data):
out=generate_prime(*data)
if data[0]==1:
return False
elif data[0]==2:
if out in [3]:
return True
elif data[0]==3:
if out in [5,7]:
return True
elif data[0]==4:
if out in [11,13]:
return True
else:
return True
return False
t.regist(generate_prime)
t.regist(test_generate_prime)
t.add(generate_prime,[2])
t.add(generate_prime,[3])
t.add(generate_prime,[4])
t.test()
|
import re
import unicodedata
from .base import Service, Tokenizer, triples
def maketrans_remove(accents=("COMBINING ACUTE ACCENT", "COMBINING GRAVE ACCENT")):
""" Makes a translation for removing accents from a string. """
return str.maketrans("", "", "".join([unicodedata.lookup(a) for a in accents]))
class ClancyService(Service):
"""
headword/lemma retrieval from Steven Clancy's russian database.
>>> clancy = ClancyService(lang="rus")
>>> clancy.lemmatize("газету")
['газета']
>>> clancy.lemmatize("дела")
['дело', 'деть']
>>> clancy.lemmatize("ру́сская")
['русский']
>>> clancy.lemmatize("все")
['весь', 'все', 'всё']
>>> clancy.lemmatize("мороженое")
['мороженый', 'мороженое']
"""
SID = "clancy"
LANGUAGES = ["rus"]
ENDPOINT = "http://visualizingrussian.fas.harvard.edu/api/lemmatize"
def _build_params(self, form):
return dict(word=form)
def _response_to_lemmas(self, response):
if response.ok:
body = response.json().get("data", {}).get("lemmas", [])
if not isinstance(body, list):
body = []
else:
body = []
lemmas = [item["label"] for item in body]
return lemmas
class RUSTokenizer(Tokenizer):
"""
Tokenizer for Russian.
"""
TRANSLATION_REMOVE_ACCENTS = maketrans_remove(accents=(
"COMBINING ACUTE ACCENT",
"COMBINING GRAVE ACCENT",
"COMBINING X ABOVE"
))
def tokenize(self, text):
"""
The logic below ensures that accented words aren't split
and that certain hyphenated words are treated as a single token.
For reference:
0400-04FF is the cyrillic unicode block
0300-036F is the combining diacritical marks block
"""
text = unicodedata.normalize("NFC", text) # normalize
tokens = re.split(r"([^-\u0400-\u04FF\u0300-\u036F]+)", text)
tokens = self._split_hyphenated(tokens)
return triples(tokens, normalize=self._normalize)
def _normalize(self, token):
"""
Removes accents from the text.
"""
token = unicodedata.normalize("NFD", token)
token = token.translate(self.TRANSLATION_REMOVE_ACCENTS)
return unicodedata.normalize("NFC", token)
def _split_hyphenated(self, tokens):
"""
Splits hyphenated tokens with some exceptions for prefixes/suffixes.
"""
prefixes = ("по-", "кое-")
suffixes = ("-либо", "-ка", "-нибудь", "-то")
processed = []
for token in tokens:
if "-" in token:
w = self._normalize(token)
if any([w.startswith(s) for s in prefixes]) or any([w.endswith(s) for s in suffixes]):
processed.append(token)
else:
for t in re.split(r"(-)", token):
processed.append(t)
else:
processed.append(token)
return processed
|
from .core import search_rental, search_reserve # noqa F401
|
from app.notify_client.inbound_number_client import InboundNumberClient
def test_add_inbound_sms_number(mocker, api_user_active):
inbound_number = '12345678901'
expected_url = '/inbound-number/add'
client = InboundNumberClient()
mock_post = mocker.patch('app.notify_client.inbound_number_client.InboundNumberClient.post')
client.add_inbound_sms_number(inbound_number)
mock_post.assert_called_once_with(
url=expected_url,
data={'inbound_number': inbound_number}
)
|
"""samples URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls import url
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', include('home.urls')), # Change to ads.urls
path('admin/', admin.site.urls), # Keep
path('accounts/', include('django.contrib.auth.urls')), # Keep
url(r'^oauth/', include('social_django.urls', namespace='social')), # Keep
# Sample applications
path('hello/', include('hello.urls')),
path('users/', include('users.urls')),
path('tracks/', include('tracks.urls')),
path('views/', include('views.urls')),
path('route/', include('route.urls', namespace='nsroute')),
path('tmpl/', include('tmpl.urls')),
path('gview/', include('gview.urls')),
path('session/', include('session.urls')),
path('authz/', include('authz.urls')),
path('getpost/', include('getpost.urls')),
path('form/', include('form.urls')),
path('crispy/', include('crispy.urls')),
path('myarts/', include('myarts.urls')),
path('menu/', include('menu.urls')),
path('forums/', include('forums.urls')),
path('pics/', include('pics.urls')),
path('favs/', include('favs.urls')),
path('favsql/', include('favsql.urls')),
path('rest/', include('rest.urls')),
path('autos/', include('autos.urls')),
path('usermodel/', include('usermodel.urls')),
path('chat/', include('chat.urls')),
path('util/', include('util.urls')),
]
# Serve the favicon - Keep for later
import os
from django.views.static import serve
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
urlpatterns += [
path('favicon.ico', serve, {
'path': 'favicon.ico',
'document_root': os.path.join(BASE_DIR, 'home/static'),
}
),
]
# Switch to social login if it is configured - Keep for later
try:
from . import github_settings
social_login = 'registration/login_social.html'
urlpatterns.insert(0,
path('accounts/login/', auth_views.LoginView.as_view(template_name=social_login))
)
print('Using',social_login,'as the login template')
except:
print('Using registration/login.html as the login template')
# References
# https://docs.djangoproject.com/en/2.2/ref/urls/#include
|
import matplotlib.pyplot as plt
import math
# this is a helper script to properly choose the epsilon rate of decay during DQN training
start = 1.0
end = 0.1
decay = 14000
steps = 80000
thresholds = []
for i in range(steps):
threshold = end + (start - end) * \
math.exp(-1 * i / decay)
thresholds.append(threshold)
iters = [i for i in range(steps)]
plt.plot(iters, thresholds)
plt.show()
|
# Looks like it relies on precisions from C
# Running it as C (https://www.codechef.com/ide) gives:
c_output = '0001010203040406090c0d12282d2e3034'
# Running it as superhack (http://www.hacker.org/sh/) gives:
superhack_output = '102'
|
# Copyright MaxAndMitchCorp 2020
from trust_simulator import TrustSimulator
from society import Society
from institution import Institution
import pytest
@pytest.mark.parametrize("alpha", [(1), (0.05)])
def test_societal_convergence(alpha):
society = Society(population_size=100, connectivity_probability=0.1)
institution = Institution(society = society)
trust_simulator = TrustSimulator()
societal_trust = trust_simulator.simulate_society(society=society,
institution = institution,
iterations=10,
alpha=alpha)
assert sum(societal_trust) < 1000
def test_edges_dynamic():
# Test to make sure that given a fairly low affiliation probability, that
# is, one less than the starting connectivity probability, then the number
# of edges decreases over the course of a simulation.
# Instantiate the society
society = Society(population_size=100, connectivity_probability=0.3)
institution = Institution(society = society)
# Count the number of edges to start
count1 = 0
for person_id_first in range(society.population_size):
for person_id_second in range(society.population_size):
if society.edge_matrix[person_id_first][person_id_second]==1:
count1 = count1 + 1
else:
pass
# Simulate the society through with an affiliation probability half that
# of what it started with.
trust_simulator = TrustSimulator()
societal_trust = trust_simulator.simulate_society(society=society,
institution = institution,
iterations = 30,
alpha = 0.1,
affiliation_prob = 0.15)
# Count the edges again with the simulated society.
count2 = 0
for person_id_first in range(society.population_size):
for person_id_second in range(society.population_size):
if society.edge_matrix[person_id_first][person_id_second]==1:
count2 = count2 + 1
else:
pass
# Assert that count1 should be greater than count2
assert count1 > count2
def test_number_edges():
# This test is to make sure that given an affiliation probability close to
# 1/2, the number of edges formed with the node is neither close to 0
# nor the population size by the end of the simulation.
# Instantiate the society and institution
society = Society(population_size=100, connectivity_probability=0.5)
institution = Institution(society = society)
# Simulate the society
trust_simulator = TrustSimulator()
societal_trust = trust_simulator.simulate_society(society = society,
institution = institution,
iterations = 30,
alpha = 0.1,
affiliation_prob = 0.5)
# Assert for every node that the number of edges is neither 0 nor 100
count = 0
for person_id_first in range(society.population_size):
for person_id_second in range(society.population_size):
if society.edge_matrix[person_id_first][person_id_second]==1:
count = count + 1
assert ((count > 0) and (count < society.population_size))
count = 0
|
# ニュートン法の収束地図の原点拡大アニメーションを作成するコード
from PIL import Image, ImageDraw
from numba import jit
@jit
def newton(x):
for _ in range(50):
x = x - (x**3-1)/(3*x**2)
return x
@jit
def plot(draw, s, mag):
hs = s/2
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
for x in range(s):
for y in range(s):
z = complex(x-hs+0.5, y-hs+0.5)/128*mag
z = newton(z)
if z.real > 0:
c = red
else:
if z.imag > 0:
c = green
else:
c = blue
draw.rectangle([x, y, x+1, y+1], fill=c)
@jit
def save_files(index):
mag = (100.0 - index) / 100
filename = "map{:02d}.png".format(index)
print(filename, mag)
size = 512
im = Image.new("RGB", (size, size))
draw = ImageDraw.Draw(im)
plot(draw, size, mag)
im.save(filename)
for i in range(100):
save_files(i)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pathlib import Path
from typing import (Callable, Iterable, Iterator, List, Sequence, Tuple,
TypeVar, Union)
SHOW_FUNC_DATA = Sequence[Path]
SHOW_FUNC = Callable[[bool, SHOW_FUNC_DATA], None]
DIFF_FUNC = Callable[
[Sequence[str], Sequence[str]],
Iterable[str]]
GEN_PATH_FUNC = Callable[[Path], Tuple[Path, Path]]
PATH_FUNC = Callable[[Path], Path]
|
from MonkeyBook.extensions import db
from sqlalchemy_utils import EmailType
from sqlalchemy import ForeignKeyConstraint
monkey_friends = db.Table('monkey_friends',
db.Column(
'monkey_id', db.Integer,
db.ForeignKey('monkey.id', onupdate='CASCADE', ondelete='CASCADE'),
primary_key=True
),
db.Column(
'friend_id', db.Integer,
db.ForeignKey('monkey.id', onupdate='CASCADE', ondelete='CASCADE'),
primary_key=True
)
)
best_friends = db.Table('best_friends',
db.Column('monkey_id', db.Integer, primary_key=True),
db.Column('friend_id', db.Integer),
ForeignKeyConstraint(
['monkey_id', 'friend_id'],
['monkey_friends.monkey_id', 'monkey_friends.friend_id'],
name='fk_favorite_entry', use_alter=True, onupdate='CASCADE',
ondelete='CASCADE'
)
)
class Monkey(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
email = db.Column(EmailType(), nullable=False, unique=True)
age = db.Column(db.Integer, nullable=False)
friends = db.relationship(
'Monkey', secondary=monkey_friends,
primaryjoin=id==monkey_friends.c.monkey_id,
secondaryjoin=id==monkey_friends.c.friend_id,
backref='friended_by',
foreign_keys=[monkey_friends.c.monkey_id, monkey_friends.c.friend_id]
)
best_friend = db.relationship(
'Monkey', secondary=best_friends,
primaryjoin=id==best_friends.c.monkey_id,
secondaryjoin=id==best_friends.c.friend_id,
foreign_keys=[best_friends.c.monkey_id, best_friends.c.friend_id],
remote_side=[id], uselist=False, post_update=True,
backref='best_friended_by'
)
def __init__(self, name=None, email=None, age=None, friends=[],
best_friend=None):
self.name = name
self.email = email
self.age = age
self.friends = friends
self.best_friend = best_friend
def __repr__(self):
return '<{cls} id={id}, name={name}>'.format(
id=self.id,
cls=self.__class__.__name__,
name=self.name
)
|
#!/usr/bin/python
#rev 2015-11-12
print "Importing python modules ..."
from time import time
start_time = time()
from numpy import loadtxt
print("--- numpy %s seconds ---" % (time() - start_time))
import matplotlib.pyplot as plt
print("--- matplotlib %s seconds ---" % (time() - start_time))
from itertools import cycle
print("--- itertools %s seconds ---" % (time() - start_time))
#from os import fork
from sys import argv
print("--- sys %s seconds ---" % (time() - start_time))
from os import fork
print("--- os %s seconds ---" % (time() - start_time))
print "done!"
#for arg in sys.argv:
# print arg
#plt.ion()
#import matplotlib
#matplotlib.interactive(False)
file=loadtxt(argv[1])
natoms=argv[2].split()
sample=file
bins=int((sample[:,1].max()-sample[:,1].min())/.05)
print "Plotting the histogram "
if fork():
print 'fork passed'
pass
else:
f, (ax1, ax2) = plt.subplots(1,2, sharey=True)
lines = ["*b","or","^k",">p","<b"]
linecycler = cycle(lines)
m=0
for i in natoms:
i=int(i)
ax1.plot(sample[m:m+i,0],sample[m:m+i,1],next(linecycler))
m+=i
ax1.set_title('Atoms magnetic moments')
ax1.axhline(y=0)
ax2.hist(sample[:,1], bins=bins, orientation="horizontal");
ax2.xaxis.set_ticks_position('top')
f.subplots_adjust(wspace=0)
plt.show()
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import re
import os
from resource_management import Script, format, Package, Execute, Fail
from resource_management.core.logger import Logger
from resource_management.libraries.functions import stack_tools
from resource_management.libraries.functions.stack_select import get_stack_versions
from resource_management.core.providers import get_provider
CURRENT_ = "/current/"
stack_root = Script.get_stack_root()
stack_root_current = stack_root + CURRENT_
class RemovePreviousStacks(Script):
def actionexecute(self, env):
config = Script.get_config()
structured_output = {}
version = config['commandParams']['version']
self.stack_tool_package = stack_tools.get_stack_tool_package(stack_tools.STACK_SELECTOR_NAME)
versions_to_remove = self.get_lower_versions(version)
self.pkg_provider = get_provider("Package")
for low_version in versions_to_remove:
self.remove_stack_version(structured_output, low_version)
def remove_stack_version(self, structured_output, version):
# check simlinks not refer to version for remove
self.check_no_symlink_to_version(structured_output, version)
packages_to_remove = self.get_packages_to_remove(version)
for package in packages_to_remove:
Package(package, action="remove")
self.remove_stack_folder(structured_output, version)
structured_output["remove_previous_stacks"] = {"exit_code": 0,
"message": format("Stack version {0} successfully removed!".format(version))}
self.put_structured_out(structured_output)
def remove_stack_folder(self, structured_output, version):
if version and version != '' and stack_root and stack_root != '':
Logger.info("Removing {0}/{1}".format(stack_root, version))
try:
Execute(('rm', '-f', stack_root + version),
sudo=True)
finally:
structured_output["remove_previous_stacks"] = {"exit_code": -1,
"message": "Failed to remove version {0}{1}".format(stack_root, version)}
self.put_structured_out(structured_output)
def get_packages_to_remove(self, version):
packages = []
formated_version = version.replace('.', '_').replace('-', '_')
all_installed_packages = self.pkg_provider.all_installed_packages()
all_installed_packages = [package[0] for package in all_installed_packages]
for package in all_installed_packages:
if formated_version in package and self.stack_tool_package not in package:
packages.append(package)
Logger.info("%s added to remove" % (package))
return packages
def check_no_symlink_to_version(self, structured_output, version):
files = os.listdir(stack_root_current)
for file in files:
if version in os.path.realpath(stack_root_current + file):
structured_output["remove_previous_stacks"] = {"exit_code": -1,
"message": "{0} contains symlink to version for remove! {1}".format(
stack_root_current, version)}
self.put_structured_out(structured_output)
raise Fail("{0} contains symlink to version for remove! {1}".format(stack_root_current, version))
def get_lower_versions(self, current_version):
versions = get_stack_versions(stack_root)
Logger.info("available versions: {0}".format(str(versions)))
lover_versions = []
for version in versions:
if self.compare(version, current_version) < 0 :
lover_versions.append(version)
Logger.info("version %s added to remove" % (version))
return lover_versions
def compare(self, version1, version2):
"""
Compare version1 and version2
:param version1:
:param version2:
:return: Return negative if version1<version2, zero if version1==version2, positive if version1>version2
"""
vesion1_sections = re.findall(r"[\w']+", version1)
vesion2_sections = re.findall(r"[\w']+", version2)
return cmp(vesion1_sections, vesion2_sections)
if __name__ == "__main__":
RemovePreviousStacks().execute()
|
from rply import ParserGenerator
from rply.token import Token
from .ast import *
from .errors import *
class ParserState:
def __init__(self):
pass
def add_variable(self, var):
self.variables.append(var)
def add_constant(self, var):
self.constants.append(var)
class Parser:
def __init__(self, tokens: list, precedence: list, filename: str, source: str) -> None:
self.pg = ParserGenerator(tokens, precedence)
self.parser = self.init()
self.filename = filename
self.source = source
def init(self):
##################################################
# Program
##################################################
@self.pg.production('program : stmtList')
def prgm(state: ParserState, p):
return ProgramNode(p[0], p[0].getsourcepos())
##################################################
# Statment-List
##################################################
@self.pg.production('stmtList : stmtList stmtFull')
def stmtList_stmt(state: ParserState, p):
block = p[0]
block.add(p[1])
return block
@self.pg.production('stmtList : stmtFull')
def stmtList(state: ParserState, p):
return BlockNode(p[0], p[0].getsourcepos())
@self.pg.production('stmtFull : stmt')
@self.pg.production('stmtFull : func')
def stmtFull(state: ParserState, p):
return p[0]
##################################################
# Block
##################################################
@self.pg.production('block : block stmt')
def block_stmt(state: ParserState, p):
block = p[0]
block.add(p[1])
return block
@self.pg.production('block : stmt')
def block(state: ParserState, p):
return BlockNode(p[0], p[0].getsourcepos())
##################################################
# Statement
##################################################
@self.pg.production('stmt : mccmd ;')
def stmt(state: ParserState, p):
return p[0]
@self.pg.production('stmt : assgn ;')
@self.pg.production('stmt : decl ;')
@self.pg.production('stmt : expr-assgn ;')
@self.pg.production('stmt : expr ;')
@self.pg.production('stmt : group')
@self.pg.production('stmt : for-expr')
@self.pg.production('stmt : while-expr')
def stmt_(state: ParserState, p):
return p[0]
@self.pg.production('stmt : COMMENT')
def stmt_comment(state: ParserState, p):
return CommentNode(p[0].getstr(), p[0].getsourcepos())
##################################################
# Minecraft
##################################################
@self.pg.production('func : FUNC STRING { block }')
def func(state: ParserState, p):
return FuncNode(p[1].getstr(), p[3], p[0].getsourcepos())
@self.pg.production('mccmd : MCCMD')
def mccmd(state: ParserState, p):
return McCmdNode(p[0].getstr(), p[0].getsourcepos())
@self.pg.production('mccmd : score-decl')
@self.pg.production('mccmd : score-init')
@self.pg.production('mccmd : score-create')
@self.pg.production('mccmd : score-op')
def mccmd_other(state: ParserState, p):
return p[0]
# Scores
@self.pg.production('score-create : CREATE score-decl')
@self.pg.production('score-create : CREATE score-init')
def score_create(state: ParserState, p):
return ScoreCreateNode(p[1], p[0].getsourcepos())
@self.pg.production('score-init : score-decl = expr')
def score_init(state: ParserState, p):
return ScoreInitNode(p[0], p[2], p[0].getsourcepos())
@self.pg.production('score-decl : SCORE IDENTIFIER : IDENTIFIER')
def score_decl(state: ParserState, p):
return ScoreDeclNode(p[1].getstr(), p[3].getstr(), p[1].getstr(), p[0].getsourcepos())
@self.pg.production('score-decl : SCORE IDENTIFIER : ( IDENTIFIER , IDENTIFIER )')
def score_decl_1(state: ParserState, p):
return ScoreDeclNode(p[1].getstr(), p[4].getstr(), p[6].getstr(), p[0].getsourcepos())
# score-op
@self.pg.production('score-op : IDENTIFIER << IDENTIFIER')
def score_op_left(state: ParserState, p):
return ScoreOpLeftNode(p[0].getstr(), p[2].getstr(), p[0].getsourcepos())
@self.pg.production('score-op : IDENTIFIER >> IDENTIFIER')
def score_op_right(state: ParserState, p):
return ScoreOpRightNode(p[0].getstr(), p[2].getstr(), p[0].getsourcepos())
@self.pg.production('score-op : IDENTIFIER >< IDENTIFIER')
def score_op_swap(state: ParserState, p):
return ScoreOpSwapNode(p[0].getstr(), p[2].getstr(), p[0].getsourcepos())
# group
@self.pg.production('group : group-specifier { block }')
def group(state: ParserState, p):
return GroupNode(p[0], p[2], p[0].getsourcepos())
@self.pg.production('group-specifier : EXEC ( STRING )')
def group_spec(state: ParserState, p):
return GroupSpecNode(p[0].getstr(), p[2].getstr(), p[0].getsourcepos())
@self.pg.production('group-specifier : group-specifier , EXEC ( STRING )')
def group_spec_rec(state: ParserState, p):
p[0].append(
GroupSpecNode(p[2].getstr(), p[4].getstr(), p[0].getsourcepos())
)
return p[0]
##################################################
# Variables
##################################################
@self.pg.production('assgn : IDENTIFIER = expr')
def assgn(state: ParserState, p):
return VariableAssignNode(p[0].getstr(), p[2], p[0].getsourcepos())
@self.pg.production('assgn : IDENTIFIER PLUS PLUS')
def assgn_incr(state: ParserState, p):
return VariableIncrementNode(p[0].getstr(), p[0].getsourcepos())
@self.pg.production('assgn : IDENTIFIER MINUS MINUS')
def assgn_decr(state: ParserState, p):
return VariableDecrementNode(p[0].getstr(), p[0].getsourcepos())
@self.pg.production('decl : VAR IDENTIFIER = expr')
def decl(state: ParserState, p):
return VariableDeclareNode(p[1].getstr(), p[3], p[0].getsourcepos())
##################################################
# Expr-assgn
##################################################
@self.pg.production('expr-assgn : IDENTIFIER PLUSEQ expr')
def expr_assgn_add(state: ParserState, p):
return SelfAddNode(p[0].getstr(), p[2], p[0].getsourcepos())
@self.pg.production('expr-assgn : IDENTIFIER MINUSEQ expr')
def expr_assgn_sub(state: ParserState, p):
return SelfSubNode(p[0].getstr(), p[2], p[0].getsourcepos())
@self.pg.production('expr-assgn : IDENTIFIER MULTEQ expr')
def expr_assgn_mul(state: ParserState, p):
return SelfMultNode(p[0].getstr(), p[2], p[0].getsourcepos())
@self.pg.production('expr-assgn : IDENTIFIER DIVEQ expr')
def expr_assgn_div(state: ParserState, p):
return SelfDivNode(p[0].getstr(), p[2], p[0].getsourcepos())
##################################################
# Expression
##################################################
@self.pg.production('expr : comp')
def expr(state: ParserState, p):
return p[0]
@self.pg.production('expr : expr AND expr')
def expr_and(state: ParserState, p):
return BinaryAndNode(p[0], p[2], p[0].getsourcepos())
@self.pg.production('expr : expr OR expr')
def expr_or(state: ParserState, p):
return BinaryOrNode(p[0], p[2], p[0].getsourcepos())
##################################################
# comp-expr
##################################################
@self.pg.production('comp : arith')
def comp(state: ParserState, p):
return p[0]
@self.pg.production('comp : ! comp')
def comp_not(state: ParserState, p):
return UnaryNotNode(p[1], p[0].getsourcepos())
@self.pg.production('comp : arith < arith')
def comp_le(state: ParserState, p):
return BinaryLENode(p[0], p[2], p[0].getsourcepos())
@self.pg.production('comp : arith > arith')
def comp_ge(state: ParserState, p):
return BinaryGENode(p[0], p[2], p[0].getsourcepos())
@self.pg.production('comp : arith <= arith')
def comp_let(state: ParserState, p):
return BinaryLETNode(p[0], p[2], p[0].getsourcepos())
@self.pg.production('comp : arith >= arith')
def comp_get(state: ParserState, p):
return BinaryGETNode(p[0], p[2], p[0].getsourcepos())
@self.pg.production('comp : arith == arith')
def comp_eq(state: ParserState, p):
return BinaryEQNode(p[0], p[2], p[0].getsourcepos())
@self.pg.production('comp : arith != arith')
def comp_neq(state: ParserState, p):
return BinaryNEQNode(p[0], p[2], p[0].getsourcepos())
##################################################
# arith-expr
##################################################
@self.pg.production('arith : term')
def arith(state: ParserState, p):
return p[0]
@self.pg.production('arith : arith PLUS arith')
def arith_plus(state: ParserState, p):
return BinaryAddNode(p[0], p[2], p[0].getsourcepos())
@self.pg.production('arith : arith MINUS arith')
def arith_minus(state: ParserState, p):
return BinarySubNode(p[0], p[2], p[0].getsourcepos())
##################################################
# Term
##################################################
@self.pg.production('term : factor')
def term(state: ParserState, p):
return p[0]
@self.pg.production('term : term MULT term')
def term_mul(state: ParserState, p):
return BinaryMultNode(p[0], p[2], p[0].getsourcepos())
@self.pg.production('term : term DIV term')
def term_div(state: ParserState, p):
return BinaryDivNode(p[0], p[2], p[0].getsourcepos())
##################################################
# Factor
##################################################
@self.pg.production('factor : atom')
def fac_lit(state: ParserState, p):
return p[0]
@self.pg.production('factor : PLUS factor')
def fac_add(state: ParserState, p):
return UnaryAddNode(p[1], p[0].getsourcepos())
@self.pg.production('factor : MINUS factor')
def fac_sub(state: ParserState, p):
return UnarySubNode(p[1], p[0].getsourcepos())
##################################################
# atom
##################################################
@self.pg.production('atom : literal')
def atom(state: ParserState, p):
return p[0]
@self.pg.production('atom : ( expr )')
def atom_expr(state: ParserState, p):
return p[1]
@self.pg.production('atom : IDENTIFIER')
def atom_var(state: ParserState, p):
return VariableAccessNode(p[0].getstr(), p[0].getsourcepos())
##################################################
# Literals
##################################################
@self.pg.production('literal : INTEGER')
def literal_int(state: ParserState, p):
return IntegerNode(int(p[0].getstr()), p[0].getsourcepos())
@self.pg.production('literal : FLOAT')
def literal_float(state: ParserState, p):
return FloatNode(float(p[0].getstr()), p[0].getsourcepos())
@self.pg.production('literal : STRING')
def literal_str(state: ParserState, p):
return StringNode(p[0].getstr(), p[0].getsourcepos())
@self.pg.production('literal : BOOLEAN')
def literal_bool(state: ParserState, p):
boolean = p[0].getstr()
if boolean == "true":
boolean = 1
else:
boolean = 0
return BooleanNode(bool(boolean), p[0].getsourcepos())
##################################################
# For
##################################################
@self.pg.production('for-expr : FOR ( decl ; expr ; assgn ) { block }')
def for_expr(state: ParserState, p):
return ForNode(p[2], p[4], p[6], p[9], p[0].getsourcepos())
##################################################
# While
##################################################
@self.pg.production('while-expr : WHILE ( expr ) { block }')
def while_expr(state: ParserState, p):
return WhileNode(p[2], p[5], p[0].getsourcepos())
##################################################
# Errors
##################################################
@self.pg.error
def error_handler(state: ParserState, token: Token):
pos = token.getsourcepos()
if pos:
SynatxError(
pos,
self.filename,
self.source,
f"Unexpected Token '{token.name}'"
).raiseError()
elif token.gettokentype() == '$end':
UnexpectedEndError(
pos,
self.filename,
self.source,
f"Unexpected End"
).raiseError()
else:
SynatxError(
None,
self.filename,
self.source,
f"Unexpected Token: {token.name}"
).raiseError()
return self.pg.build()
def parse(self, text: str, state: ParserState = None):
return self.parser.parse(text, state)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 19 01:52:59 2022
@author: maout
"""
from typing import Union
import numpy as np
import torch
__all__ = ['RBF']
class RBF:
"""
RBF kernel class for pytorch implementation.
"""
def __init__(self, length_scale: Union[float, torch.tensor, np.ndarray]=1.0, signal_variance: float=1.0, device: Union[bool,str]=None, multil: Union[bool, None]=False) -> None:
"""
Kernel initialising function
Parameters
----------
length_scale : Union[float, torch.tensor, np.ndarray], optional
Lengthscale of kernel. The default is 1.0.
signal_variance : float, optional
Variance of kernel. The default is 1.0.
device : Union[bool,str], optional
Device is either 'cpu' or 'cuda'/'cpu'. The default is None.
multil : Union[bool, None], optional
Boolean variable indicating whether the lengthscale is uniform across dimensions
i.e. scalar, or a vector. True indicates a vector lengthscale.
The default is False.
Returns
-------
None
Instance of the kernel.
"""
# initialize parameters
if device is None:
self.device = torch.device("cpu")
else:
self.device = device
self.length_scale = torch.tensor(length_scale, dtype=torch.float64, device=self.device,
requires_grad=True)
self.signal_variance = torch.tensor(signal_variance, dtype=torch.float64, device=self.device,
requires_grad=True)
self.multil = torch.tensor(multil, dtype=torch.bool, device=self.device, requires_grad=False)
if self.multil:
##expand dimensions of lengtscale vector to enable broadcasting
self.length_scale = self.length_scale[None, None, :]
self.K_data = torch.tensor(0, dtype=torch.float64, device=self.device, requires_grad=False)
def Kernel(self, X: np.ndarray, Y: Union[bool, np.ndarray]=None) -> torch.tensor:
"""
Calculates the rbf gaussian kernel between data points X and Y.
If Y is missing, computes the kernel between X with itself.
Parameters
----------
X : np.ndarray
Data points, first entry -> (N x D).
Y : Union[bool, np.ndarray], optional
Data points, second entry -> (M x D). The default is None.
Returns
-------
K: torch.tensor
Array of dimension NxM (or NxN if Y is missing) with the kernel evaluated
at the data points.
"""
if not torch.is_tensor(X):
# convert inputs to pytorch tensors if not already pytorched
X = torch.tensor(X, dtype=torch.float64, device=self.device)
#N, D = X.shape
if Y is None:
Y = X
elif not torch.is_tensor(Y):
Y = torch.tensor(Y, dtype=torch.float64, device=self.device)
M, _ = Y.shape
# Re-indexing
X_i = X[:, None, :] # shape (N, D) -> (N, 1, D)
Y_j = Y[None, :, :] # shape (M, D) -> (1, M, D)
if not self.multil: ##if a single lengthscale is provided
sqd = torch.sum( (X_i - Y_j)**2, 2) # |X_i - Y_j|^2 # (N, M, D)
# Divide by length scale
#print(sqd.device)
#print(self.length_scale.device)
sqd = torch.div(sqd, self.length_scale.to(self.device)**2)
K = torch.exp( -0.5* sqd )
else:
sqd1 = torch.div( (X_i - Y_j)**2, self.length_scale.to(self.device)**2)
sqd = torch.sum( sqd1, 2)
K = torch.exp( -0.5* sqd )
K = torch.mul(self.signal_variance, K) # Signal Variance
self.K_data = K
return K#.detach().to_numpy()
def gradient_X(self, X: np.ndarray, Y: Union[bool, np.ndarray]=None) -> torch.tensor:
"""
Computes the gradient of the kernel with respect to the first argument.
Parameters
----------
X : np.ndarray
Data points, first entry -> (N x D).
Y : Union[bool, np.ndarray], optional
Data points, second entry -> (M x D). The default is None.
Returns
-------
redifs : torch.tensor
Array with the gradient of the Kernel -> (N, M, D).
"""
#N, D = X.shape
M,_ = Y.shape
diffs = X[:,None]-Y
#if self.multil:
redifs = torch.div(diffs, self.length_scale.to(self.device)**2)
redifs = torch.einsum( 'ijk,ij->ijk', redifs, self.K_data)
#redifs[:,:,ii] = np.multiply(diffs[:,:,ii],K(x,y,l,True))/(l[ii]*l[ii])
#else:
#redifs[:,:,ii] = np.multiply(diffs[:,:,ii],K(x,y,l))/(l*l)
return redifs
def gradient_X2(self, X):
return None
"""
def gradient_XX(self,X: np.ndarray, Y: Union[bool, np.ndarray]=None) -> torch.tensor:
# Convert to tensor that requires Grad
X = torch.tensor(length_scale, dtype=torch.float64, device=self.device,requires_grad=True)
if Y is None:
Y = X
else:
Y = torch.tensor(Y, dtype=torch.float64, device=self.device, requires_grad=True)
# compute the gradient kernel w.r.t. to the two inputs
J = grad(self.__call__(X, Y))
return J
"""
def gradient_XX2(self, X, Y=None):
return None
|
from more_itertools import one
import pytest
from adam.learner.integrated_learner import IntegratedTemplateLearner
from adam.learner.language_mode import LanguageMode
from adam.ontology.phase1_ontology import DAD, GAILA_PHASE_1_ONTOLOGY
from adam.perception import PerceptualRepresentation
from adam.perception.high_level_semantics_situation_to_developmental_primitive_perception import (
HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator,
)
from adam.random_utils import RandomChooser
from adam.situation import SituationObject
from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation
from tests.learner import LANGUAGE_MODE_TO_TEMPLATE_LEARNER_OBJECT_RECOGNIZER
@pytest.mark.parametrize("language_mode", [LanguageMode.ENGLISH, LanguageMode.CHINESE])
def test_with_object_recognizer(language_mode):
integrated_learner = IntegratedTemplateLearner(
object_learner=LANGUAGE_MODE_TO_TEMPLATE_LEARNER_OBJECT_RECOGNIZER[language_mode],
attribute_learner=None,
relation_learner=None,
action_learner=None,
)
dad_situation_object = SituationObject.instantiate_ontology_node(
ontology_node=DAD, ontology=GAILA_PHASE_1_ONTOLOGY
)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[dad_situation_object]
)
perception_generator = HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator(
GAILA_PHASE_1_ONTOLOGY
)
# We explicitly exclude ground in perception generation
# this generates a static perception...
perception = perception_generator.generate_perception(
situation, chooser=RandomChooser.for_seed(0), include_ground=False
)
# so we need to construct a dynamic one by hand from two identical scenes
dynamic_perception = PerceptualRepresentation(
frames=[perception.frames[0], perception.frames[0]]
)
descriptions = integrated_learner.describe(dynamic_perception)
assert len(descriptions) == 1
assert (
language_mode == LanguageMode.ENGLISH
and one(descriptions.keys()).as_token_sequence() == ("Dad",)
) or (
language_mode == LanguageMode.CHINESE
and one(descriptions.keys()).as_token_sequence() == ("ba4 ba4",)
)
|
import rhinoscriptsyntax as rs
import Rhino.Geometry as rg
import math
def fractal(pt1,pt2,angle):
# creating a curve to extract its division points
line=rs.AddLine(pt1,pt2)
points=rs.DivideCurve(line,4)
# 1/3 point
between_points_1=points[1]
between_points_2=points[3]
# mid vector
mid_vec= rg.Vector3d(points[2].X - points[1].X, points[2].Y - points[1].Y, points[2].Z - points[1].Z)
mid_vec.Rotate(math.radians(angle), rg.Vector3d.ZAxis)
mid_vec=rs.VectorUnitize(mid_vec)
mid_vec*=length
# mid point
mid_point=((pt2+pt1)/2)+mid_vec
# returning points
return pt1,between_points_1,mid_point,between_points_2,pt2
a=fractal(first,second,angle)
###########################
###########################
# then change it into this:
import rhinoscriptsyntax as rs
import Rhino.Geometry as rg
import math
def create_points(first_point,second_point):
# creating a curve to extract its division points
line=rs.AddLine(first_point,second_point)
points=rs.DivideCurve(line,4)
# 1/3 point
between_points_1=points[1]
between_points_2=points[3]
# mid vector
mid_vec= rg.Vector3d(points[2].X - points[1].X, points[2].Y - points[1].Y, points[2].Z - points[1].Z)
mid_vec.Rotate(math.radians(angle), rg.Vector3d.ZAxis)
mid_vec=rs.VectorUnitize(mid_vec)
mid_vec*=length
# mid point
mid_point=((second_point+first_point)/2)+mid_vec
# returning points
return first_point,between_points_1,mid_point,between_points_2,second_point
def snowflake(first_point,second_point,iteration,poly_list):
if iteration>0:
temp_points=create_points(first_point,second_point)
poly=rs.AddPolyline([temp_points[0],temp_points[1],temp_points[2],temp_points[3],temp_points[4]])
if iteration==1:
poly_list.append(poly)
snowflake(temp_points[0],temp_points[1],iteration-1,poly_list)
snowflake(temp_points[1],temp_points[2],iteration-1,poly_list)
snowflake(temp_points[2],temp_points[3],iteration-1,poly_list)
snowflake(temp_points[3],temp_points[4],iteration-1,poly_list)
return poly_list
b=[]
a=snowflake(first,second,iteration,b)
|
# Copyright 2019 The PlaNet Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import numpy as np
from planet import control
from planet import networks
from planet import tools
Task = collections.namedtuple(
'Task', 'name, env_ctor, max_length, state_components')
def cartpole_balance(config, params):
action_repeat = params.get('action_repeat', 8)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'cartpole', 'balance')
return Task('cartpole_balance', env_ctor, max_length, state_components)
def cartpole_swingup(config, params):
action_repeat = params.get('action_repeat', 8)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'cartpole', 'swingup')
return Task('cartpole_swingup', env_ctor, max_length, state_components)
def finger_spin(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity', 'touch']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'finger', 'spin')
return Task('finger_spin', env_ctor, max_length, state_components)
def cheetah_run(config, params):
action_repeat = params.get('action_repeat', 4)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'cheetah', 'run')
return Task('cheetah_run', env_ctor, max_length, state_components)
def cup_catch(config, params):
action_repeat = params.get('action_repeat', 6)
max_length = 1000 // action_repeat
state_components = ['reward', 'position', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'ball_in_cup', 'catch')
return Task('cup_catch', env_ctor, max_length, state_components)
def walker_walk(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = ['reward', 'height', 'orientations', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'walker', 'walk')
return Task('walker_walk', env_ctor, max_length, state_components)
def humanoid_walk(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = [
'reward', 'com_velocity', 'extremities', 'head_height', 'joint_angles',
'torso_vertical', 'velocity']
env_ctor = functools.partial(
_dm_control_env, action_repeat, max_length, 'humanoid', 'walk')
return Task('humanoid_walk', env_ctor, max_length, state_components)
def gym_cheetah(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = ['reward', 'state']
env_ctor = functools.partial(
_gym_env, action_repeat, config.batch_shape[1], max_length,
'HalfCheetah-v3')
return Task('gym_cheetah', env_ctor, max_length, state_components)
def gym_racecar(config, params):
action_repeat = params.get('action_repeat', 2)
max_length = 1000 // action_repeat
state_components = ['reward']
env_ctor = functools.partial(
_gym_env, action_repeat, config.batch_shape[1], max_length,
'CarRacing-v0', obs_is_image=True)
return Task('gym_racing', env_ctor, max_length, state_components)
def gym_sokoban(config, params):
action_repeat = params.get('action_repeat', 1)
max_length = 1000 // action_repeat
state_components = ['reward']
env_ctor = functools.partial(
_gym_env, action_repeat, config.batch_shape[1], max_length,
'Sokoban-small-v0', act_is_discrete=True, obs_is_image=True)
return Task('gym_sokoban', env_ctor, max_length, state_components)
def gym_atari(config, params):
game_name = params.get('game_name', 'Freeway-v0')
action_repeat = params.get('action_repeat', 4)
max_length = 2000 // action_repeat
state_components = ['reward']
env_ctor = functools.partial(
_gym_env, action_repeat, config.batch_shape[1], max_length,
game_name, act_is_discrete=True, obs_is_image=True)
return Task('gym_' + game_name, env_ctor, max_length, state_components)
def _dm_control_env(action_repeat, max_length, domain, task):
from dm_control import suite
env = control.wrappers.DeepMindWrapper(suite.load(domain, task), (64, 64))
env = control.wrappers.ActionRepeat(env, action_repeat)
env = control.wrappers.MaximumDuration(env, max_length)
env = control.wrappers.PixelObservations(env, (64, 64), np.uint8, 'image')
env = control.wrappers.ConvertTo32Bit(env)
return env
def _gym_env(action_repeat, min_length, max_length, name,
act_is_discrete=False, obs_is_image=False):
if "Sokoban" in name:
import gym_sokoban
import gym
env = gym.make(name)
env = env.env # Remove the TimeLimit wrapper
env.frameskip = 1 # Disable Gym frame skipping
env = control.wrappers.ActionRepeat(env, action_repeat)
if act_is_discrete:
env = control.wrappers.DiscreteWrapper(env)
else:
env = control.wrappers.NormalizeActions(env)
env = control.wrappers.MinimumDuration(env, min_length)
env = control.wrappers.MaximumDuration(env, max_length)
if obs_is_image:
env = control.wrappers.ObservationDict(env, 'image')
env = control.wrappers.ObservationToRender(env)
else:
env = control.wrappers.ObservationDict(env, 'state')
env = control.wrappers.PixelObservations(env, (64, 64), np.uint8, 'image')
env = control.wrappers.ConvertTo32Bit(env)
return env
|
import pytest
from typing import Optional
from pygraphy.types import (
Object,
Input,
Schema,
field
)
from examples.starwars.schema import Schema as StarwarsSchema
from examples.simple_example import Schema as SimpleSchema
from examples.complex_example import Schema as ComplexSchema
pytestmark = pytest.mark.asyncio
async def test_starwars_query():
query = """
query FetchLukeQuery {
human(id: "1000") {
name
}
}
"""
assert await StarwarsSchema.execute(query, serialize=True) == \
r'{"errors": null, "data": {"human": {"name": "foo"}}}'
async def test_simple_query():
query = """
query something {
patron {
id
name
age
}
}
"""
assert await SimpleSchema.execute(query, serialize=True) == \
r'{"errors": null, "data": {"patron": {"id": "1", "name": "Syrus", "age": 27}}}'
query = """
query {
patrons(ids: [1, 2, 3]) {
id
name
age
}
}
"""
assert await SimpleSchema.execute(query, serialize=True) == \
r'{"errors": null, "data": {"patrons": [{"id": "1", "name": "Syrus", "age": 27}, {"id": "2", "name": "Syrus", "age": 27}, {"id": "3", "name": "Syrus", "age": 27}]}}'
async def test_alias_field():
query = """
query something {
user: patron {
id
firstName: name
age
}
}
"""
assert await SimpleSchema.execute(query) == {
'data': {
'user': {
'age': 27, 'firstName': 'Syrus', 'id': '1'
}
}, 'errors': None
}
async def test_complex_query():
query = """
query something{
address(geo: {lat:32.2, lng:12}) {
latlng
}
}
"""
assert await ComplexSchema.execute(query, serialize=True) == \
r'{"errors": null, "data": {"address": {"latlng": "(32.2,12)"}}}'
async def test_complex_mutation():
mutation = """
mutation addAddress{
createAddress(geo: {lat:32.2, lng:12}) {
latlng
foobar {
... on Bar {
b
}
}
}
}
"""
assert await ComplexSchema.execute(mutation, serialize=True) == \
r'{"errors": null, "data": {"createAddress": {"latlng": "(32.2,12)", "foobar": [{}, {}, {}, {}, {}]}}}'
mutation = """
mutation addAddress{
createAddress(geo: {lat:32.2, lng:12}) {
latlng
foobar {
... on Foo {
a
}
}
}
}
"""
assert await ComplexSchema.execute(mutation, serialize=True) == \
r'{"errors": null, "data": {"createAddress": {"latlng": "(32.2,12)", "foobar": [{"a": "test"}, {"a": "test"}, {"a": "test"}, {"a": "test"}, {"a": "test"}]}}}'
async def test_raise_error():
query = """
query test {
exception(content: "test")
}
"""
assert await SimpleSchema.execute(query, serialize=True) == \
'{"errors": [{"message": "test", "locations": [{"line": 3, "column": 13}], "path": ["exception"]}], "data": null}'
async def test_variables():
query = """
query something($geo: GeoInput) {
address(geo: $geo) {
latlng
}
}
"""
assert await ComplexSchema.execute(query, serialize=True, variables={"geo": {"lat":32.2, "lng":12}}) == \
r'{"errors": null, "data": {"address": {"latlng": "(32.2,12)"}}}'
query = """
query something($patron: [int]) {
patrons(ids: $patron) {
id
name
age
}
}
"""
assert await SimpleSchema.execute(query, serialize=True, variables={"patron": [1, 2, 3]}) == \
r'{"errors": null, "data": {"patrons": [{"id": "1", "name": "Syrus", "age": 27}, {"id": "2", "name": "Syrus", "age": 27}, {"id": "3", "name": "Syrus", "age": 27}]}}'
async def test_field_name_case():
class FooInput(Input):
snake_case: str
camelCase: str
class Foo(Object):
snake_case: str
camelCase: str
class Query(Object):
@field
def get_foo(self, foo: FooInput) -> Foo:
return Foo(snake_case=foo.snake_case,
camelCase=foo.camelCase)
class PySchema(Schema):
query: Optional[Query]
query = """
query something($foo: FooInput) {
get_foo (foo: {
snakeCase: "sth"
camelCase: "sth"
}) {
snakeCase
camelCase
}
}
"""
assert await PySchema.execute(query, serialize=True) == \
r'{"errors": null, "data": {"get_foo": {"snakeCase": "sth", "camelCase": "sth"}}}'
async def test_field_name_case_with_vars():
class FooInput(Input):
snake_case: str
camelCase: str
class Foo(Object):
snake_case: str
camelCase: str
class Query(Object):
@field
def get_foo(self, foo: FooInput) -> Foo:
return Foo(snake_case=foo.snake_case,
camelCase=foo.camelCase)
class PySchema(Schema):
query: Optional[Query]
query = """
query something($foo: FooInput) {
get_foo (foo: $foo) {
snake_case
camelCase
}
}
"""
assert await PySchema.execute(query, serialize=True, variables={"foo": {"snakeCase":"sth", "camelCase":"sth"}}) == \
r'{"errors": null, "data": {"get_foo": {"snake_case": "sth", "camelCase": "sth"}}}'
|
#!/usr/bin/python
DOCUMENTATION = '''
---
module: bmf_rest_bigchain_create_chain
short_description: BMF BigChain create chain
description:
- Makes HTTPS request to create chain.
version_added: "1.0.0"
author:
- Ted Elhourani (ted@bigswitch.com)
notes:
- This module can only be called in a playbook with the connection set to
'local'. The module will run locally on the Ansible host.
options: {}
'''
EXAMPLES = '''
# Playbook task
- hosts: tme-bmf-controllers
connection: local
gather_facts: no
any_errors_fatal: true
tasks:
- name: Create chain
bmf_rest_bigchain_create_chain: 'node={{ inventory_hostname }} name=Chain1 switch=00:00:cc:37:ab:2c:97:ea endpoint1=ethernet11 endpoint2=ethernet17'
# Here's how you would run it.
% ansible-playbook <playbook.yml> -v
'''
RETURN = '''
PLAY [tme-bmf-controllers] *****************************************************
TASK [Create chain] ************************************************************
task path: /home/admin/playbooks/bmf_bigchain_config_tasks.yml:7
<10.2.19.102> ESTABLISH LOCAL CONNECTION FOR USER: admin
<10.2.19.102> EXEC /bin/sh -c '( umask 77 && mkdir -p "` echo $HOME/.ansible/tmp/ansible-tmp-1477065852.28-135238786849019 `" && echo ansible-tmp-1477065852.28-135238786849019="` echo $HOME/.ansible/tmp/ansible-tmp-1477065852.28-135238786849019 `" ) && sleep 0'
<10.2.19.102> PUT /tmp/tmpYVl24r TO /home/admin/.ansible/tmp/ansible-tmp-1477065852.28-135238786849019/bmf_rest_bigchain_create_chain
<10.2.19.102> EXEC /bin/sh -c 'chmod u+x /home/admin/.ansible/tmp/ansible-tmp-1477065852.28-135238786849019/ /home/admin/.ansible/tmp/ansible-tmp-1477065852.28-135238786849019/bmf_rest_bigchain_create_chain && sleep 0'
<10.2.19.102> EXEC /bin/sh -c 'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8 LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/admin/.ansible/tmp/ansible-tmp-1477065852.28-135238786849019/bmf_rest_bigchain_create_chain; rm -rf "/home/admin/.ansible/tmp/ansible-tmp-1477065852.28-135238786849019/" > /dev/null 2>&1 && sleep 0'
ok: [10.2.19.102] => {"chain": {}, "changed": false, "endpoints": {}, "invocation": {"module_args": {"endpoint1": "ethernet11", "endpoint2": "ethernet17", "name": "Chain1", "node": "10.2.19.102", "switch": "00:00:cc:37:ab:2c:97:ea"}, "module_name": "bmf_rest_bigchain_create_chain"}}
PLAY RECAP *********************************************************************
10.2.19.102 : ok=1 changed=0 unreachable=0 failed=0
'''
import sys
import json
sys.path.append('/home/admin/modules')
from bsn_rest import BmfRest
from ansible.module_utils.basic import *
def main():
fields = dict(node = dict(required=True),
user = dict(required=True),
password = dict(required=True),
chain = dict(required=True, type="str"),
service = dict(required=True, type="str"),
service_instance = dict(required=True, type="str"),
sequence = dict(required=True, type="str"))
module = AnsibleModule(
argument_spec = fields,
supports_check_mode = True)
node = module.params['node']
user = module.params['user']
password = module.params['password']
chain = module.params['chain']
service = module.params['service']
service_instance = module.params['service_instance']
sequence = module.params['sequence']
result = {}
with BmfRest(host=node, user=user, password=password, debug=True) as dev:
path = dev.bigchain_path()+'chain[name="%s"]/service[sequence=%s]' % (chain, sequence)
data = {"service-name": service, "instance": service_instance, "sequence": sequence}
insert_service_instance = dev.put(path, data=data)['content']
result = dict(insert_service_instance=insert_service_instance)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
from discord.ext import commands
import discord
import json
import logging
from .utils import data
discord_logger = logging.getLogger('discord')
class Administrator(commands.Cog):
@commands.command(brief="Reloads a module.")
@commands.check(commands.is_owner())
async def reload(self, ctx: commands.Context, module: str):
discord_logger.info("{:s} issued command to reload module {:s}".format(str(ctx.message.author), module))
try:
ctx.bot.reload_extension("package." + module)
await ctx.send("Reloaded extension {:s}.".format(module))
except commands.ExtensionNotFound:
await ctx.send("Failed to reload extension {:s}: Not found.".format(module))
except commands.ExtensionFailed:
await ctx.send("Failed to reload extension {:s}: Failed to set up.".format(module))
except commands.ExtensionNotLoaded:
await ctx.send("Failed to reload extension {:s}: Not loaded yet. Please use load command to load extension first.".format(module))
@commands.command(brief="Unloads a module.")
@commands.check(commands.is_owner())
async def unload(self, ctx: commands.Context, module: str):
discord_logger.info("{:s} issued command to unload module {:s}".format(str(ctx.message.author), module))
try:
ctx.bot.unload_extension("package." + module)
except commands.ExtensionNotLoaded:
await ctx.send("Failed to unload extension {:s}: Not loaded yet. Please use load command to load extension first.".format(module))
@commands.command(brief="Loads a module.")
@commands.check(commands.is_owner())
async def load(self, ctx: commands.Context, module: str):
discord_logger.info("{:s} issued command to load module {:s}".format(str(ctx.message.author), module))
try:
ctx.bot.load_extension(module)
except commands.ExtensionNotFound:
await ctx.send("Failed to load extension {:s}: Not found.".format(module))
except commands.ExtensionAlreadyLoaded:
await ctx.send("Failed to load extension {0:s}: {0:s} was already loaded.".format(module))
except commands.ExtensionFailed:
await ctx.send("Failed to load extension {0:s}: {0:s} errored in its entry function.".format(module))
def setup(bot: commands.Bot):
bot.add_cog(Administrator(data))
discord_logger.info("Loaded extension generic.admin")
def teardown(bot: commands.Bot):
bot.remove_cog("Administrator")
discord_logger.info("Unloaded extension generic.admin")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.